1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains common KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12 #include <linux/export.h>
13 #include <linux/init.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/linkage.h>
17 #include <linux/memblock.h>
18 #include <linux/memory.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/printk.h>
22 #include <linux/sched.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/slab.h>
25 #include <linux/stacktrace.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/bug.h>
29
30 #include "kasan.h"
31 #include "../slab.h"
32
kasan_addr_to_slab(const void * addr)33 struct slab *kasan_addr_to_slab(const void *addr)
34 {
35 if (virt_addr_valid(addr))
36 return virt_to_slab(addr);
37 return NULL;
38 }
39
kasan_save_stack(gfp_t flags,bool can_alloc)40 depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
41 {
42 unsigned long entries[KASAN_STACK_DEPTH];
43 unsigned int nr_entries;
44
45 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
46 return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
47 }
48
kasan_set_track(struct kasan_track * track,gfp_t flags)49 void kasan_set_track(struct kasan_track *track, gfp_t flags)
50 {
51 track->pid = current->pid;
52 track->stack = kasan_save_stack(flags, true);
53 }
54
55 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
kasan_enable_current(void)56 void kasan_enable_current(void)
57 {
58 current->kasan_depth++;
59 }
60 EXPORT_SYMBOL(kasan_enable_current);
61
kasan_disable_current(void)62 void kasan_disable_current(void)
63 {
64 current->kasan_depth--;
65 }
66 EXPORT_SYMBOL(kasan_disable_current);
67
68 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69
__kasan_unpoison_range(const void * address,size_t size)70 void __kasan_unpoison_range(const void *address, size_t size)
71 {
72 kasan_unpoison(address, size, false);
73 }
74
75 #ifdef CONFIG_KASAN_STACK
76 /* Unpoison the entire stack for a task. */
kasan_unpoison_task_stack(struct task_struct * task)77 void kasan_unpoison_task_stack(struct task_struct *task)
78 {
79 void *base = task_stack_page(task);
80
81 kasan_unpoison(base, THREAD_SIZE, false);
82 }
83
84 /* Unpoison the stack for the current task beyond a watermark sp value. */
kasan_unpoison_task_stack_below(const void * watermark)85 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
86 {
87 /*
88 * Calculate the task stack base address. Avoid using 'current'
89 * because this function is called by early resume code which hasn't
90 * yet set up the percpu register (%gs).
91 */
92 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
93
94 kasan_unpoison(base, watermark - base, false);
95 }
96 #endif /* CONFIG_KASAN_STACK */
97
__kasan_unpoison_pages(struct page * page,unsigned int order,bool init)98 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
99 {
100 u8 tag;
101 unsigned long i;
102
103 if (unlikely(PageHighMem(page)))
104 return;
105
106 tag = kasan_random_tag();
107 kasan_unpoison(set_tag(page_address(page), tag),
108 PAGE_SIZE << order, init);
109 for (i = 0; i < (1 << order); i++)
110 page_kasan_tag_set(page + i, tag);
111 }
112
__kasan_poison_pages(struct page * page,unsigned int order,bool init)113 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
114 {
115 if (likely(!PageHighMem(page)))
116 kasan_poison(page_address(page), PAGE_SIZE << order,
117 KASAN_PAGE_FREE, init);
118 }
119
__kasan_cache_create_kmalloc(struct kmem_cache * cache)120 void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
121 {
122 cache->kasan_info.is_kmalloc = true;
123 }
124
__kasan_poison_slab(struct slab * slab)125 void __kasan_poison_slab(struct slab *slab)
126 {
127 struct page *page = slab_page(slab);
128 unsigned long i;
129
130 for (i = 0; i < compound_nr(page); i++)
131 page_kasan_tag_reset(page + i);
132 kasan_poison(page_address(page), page_size(page),
133 KASAN_SLAB_REDZONE, false);
134 }
135
__kasan_unpoison_object_data(struct kmem_cache * cache,void * object)136 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
137 {
138 kasan_unpoison(object, cache->object_size, false);
139 }
140
__kasan_poison_object_data(struct kmem_cache * cache,void * object)141 void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
142 {
143 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
144 KASAN_SLAB_REDZONE, false);
145 }
146
147 /*
148 * This function assigns a tag to an object considering the following:
149 * 1. A cache might have a constructor, which might save a pointer to a slab
150 * object somewhere (e.g. in the object itself). We preassign a tag for
151 * each object in caches with constructors during slab creation and reuse
152 * the same tag each time a particular object is allocated.
153 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
154 * accessed after being freed. We preassign tags for objects in these
155 * caches as well.
156 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
157 * is stored as an array of indexes instead of a linked list. Assign tags
158 * based on objects indexes, so that objects that are next to each other
159 * get different tags.
160 */
assign_tag(struct kmem_cache * cache,const void * object,bool init)161 static inline u8 assign_tag(struct kmem_cache *cache,
162 const void *object, bool init)
163 {
164 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
165 return 0xff;
166
167 /*
168 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
169 * set, assign a tag when the object is being allocated (init == false).
170 */
171 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
172 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
173
174 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
175 #ifdef CONFIG_SLAB
176 /* For SLAB assign tags based on the object index in the freelist. */
177 return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
178 #else
179 /*
180 * For SLUB assign a random tag during slab creation, otherwise reuse
181 * the already assigned tag.
182 */
183 return init ? kasan_random_tag() : get_tag(object);
184 #endif
185 }
186
__kasan_init_slab_obj(struct kmem_cache * cache,const void * object)187 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
188 const void *object)
189 {
190 /* Initialize per-object metadata if it is present. */
191 if (kasan_requires_meta())
192 kasan_init_object_meta(cache, object);
193
194 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
195 object = set_tag(object, assign_tag(cache, object, true));
196
197 return (void *)object;
198 }
199
____kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool quarantine,bool init)200 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
201 unsigned long ip, bool quarantine, bool init)
202 {
203 void *tagged_object;
204
205 if (!kasan_arch_is_ready())
206 return false;
207
208 tagged_object = object;
209 object = kasan_reset_tag(object);
210
211 if (is_kfence_address(object))
212 return false;
213
214 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
215 object)) {
216 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
217 return true;
218 }
219
220 /* RCU slabs could be legally used after free within the RCU period */
221 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
222 return false;
223
224 if (!kasan_byte_accessible(tagged_object)) {
225 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
226 return true;
227 }
228
229 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
230 KASAN_SLAB_FREE, init);
231
232 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
233 return false;
234
235 if (kasan_stack_collection_enabled())
236 kasan_save_free_info(cache, tagged_object);
237
238 return kasan_quarantine_put(cache, object);
239 }
240
__kasan_slab_free(struct kmem_cache * cache,void * object,unsigned long ip,bool init)241 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
242 unsigned long ip, bool init)
243 {
244 return ____kasan_slab_free(cache, object, ip, true, init);
245 }
246
____kasan_kfree_large(void * ptr,unsigned long ip)247 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
248 {
249 if (ptr != page_address(virt_to_head_page(ptr))) {
250 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
251 return true;
252 }
253
254 if (!kasan_byte_accessible(ptr)) {
255 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
256 return true;
257 }
258
259 /*
260 * The object will be poisoned by kasan_poison_pages() or
261 * kasan_slab_free_mempool().
262 */
263
264 return false;
265 }
266
__kasan_kfree_large(void * ptr,unsigned long ip)267 void __kasan_kfree_large(void *ptr, unsigned long ip)
268 {
269 ____kasan_kfree_large(ptr, ip);
270 }
271
__kasan_slab_free_mempool(void * ptr,unsigned long ip)272 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
273 {
274 struct folio *folio;
275
276 folio = virt_to_folio(ptr);
277
278 /*
279 * Even though this function is only called for kmem_cache_alloc and
280 * kmalloc backed mempool allocations, those allocations can still be
281 * !PageSlab() when the size provided to kmalloc is larger than
282 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
283 */
284 if (unlikely(!folio_test_slab(folio))) {
285 if (____kasan_kfree_large(ptr, ip))
286 return;
287 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
288 } else {
289 struct slab *slab = folio_slab(folio);
290
291 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
292 }
293 }
294
__kasan_slab_alloc(struct kmem_cache * cache,void * object,gfp_t flags,bool init)295 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
296 void *object, gfp_t flags, bool init)
297 {
298 u8 tag;
299 void *tagged_object;
300
301 if (gfpflags_allow_blocking(flags))
302 kasan_quarantine_reduce();
303
304 if (unlikely(object == NULL))
305 return NULL;
306
307 if (is_kfence_address(object))
308 return (void *)object;
309
310 /*
311 * Generate and assign random tag for tag-based modes.
312 * Tag is ignored in set_tag() for the generic mode.
313 */
314 tag = assign_tag(cache, object, false);
315 tagged_object = set_tag(object, tag);
316
317 /*
318 * Unpoison the whole object.
319 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
320 */
321 kasan_unpoison(tagged_object, cache->object_size, init);
322
323 /* Save alloc info (if possible) for non-kmalloc() allocations. */
324 if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
325 kasan_save_alloc_info(cache, tagged_object, flags);
326
327 return tagged_object;
328 }
329
____kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)330 static inline void *____kasan_kmalloc(struct kmem_cache *cache,
331 const void *object, size_t size, gfp_t flags)
332 {
333 unsigned long redzone_start;
334 unsigned long redzone_end;
335
336 if (gfpflags_allow_blocking(flags))
337 kasan_quarantine_reduce();
338
339 if (unlikely(object == NULL))
340 return NULL;
341
342 if (is_kfence_address(kasan_reset_tag(object)))
343 return (void *)object;
344
345 /*
346 * The object has already been unpoisoned by kasan_slab_alloc() for
347 * kmalloc() or by kasan_krealloc() for krealloc().
348 */
349
350 /*
351 * The redzone has byte-level precision for the generic mode.
352 * Partially poison the last object granule to cover the unaligned
353 * part of the redzone.
354 */
355 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
356 kasan_poison_last_granule((void *)object, size);
357
358 /* Poison the aligned part of the redzone. */
359 redzone_start = round_up((unsigned long)(object + size),
360 KASAN_GRANULE_SIZE);
361 redzone_end = round_up((unsigned long)(object + cache->object_size),
362 KASAN_GRANULE_SIZE);
363 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
364 KASAN_SLAB_REDZONE, false);
365
366 /*
367 * Save alloc info (if possible) for kmalloc() allocations.
368 * This also rewrites the alloc info when called from kasan_krealloc().
369 */
370 if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
371 kasan_save_alloc_info(cache, (void *)object, flags);
372
373 /* Keep the tag that was set by kasan_slab_alloc(). */
374 return (void *)object;
375 }
376
__kasan_kmalloc(struct kmem_cache * cache,const void * object,size_t size,gfp_t flags)377 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
378 size_t size, gfp_t flags)
379 {
380 return ____kasan_kmalloc(cache, object, size, flags);
381 }
382 EXPORT_SYMBOL(__kasan_kmalloc);
383
__kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)384 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
385 gfp_t flags)
386 {
387 unsigned long redzone_start;
388 unsigned long redzone_end;
389
390 if (gfpflags_allow_blocking(flags))
391 kasan_quarantine_reduce();
392
393 if (unlikely(ptr == NULL))
394 return NULL;
395
396 /*
397 * The object has already been unpoisoned by kasan_unpoison_pages() for
398 * alloc_pages() or by kasan_krealloc() for krealloc().
399 */
400
401 /*
402 * The redzone has byte-level precision for the generic mode.
403 * Partially poison the last object granule to cover the unaligned
404 * part of the redzone.
405 */
406 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
407 kasan_poison_last_granule(ptr, size);
408
409 /* Poison the aligned part of the redzone. */
410 redzone_start = round_up((unsigned long)(ptr + size),
411 KASAN_GRANULE_SIZE);
412 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
413 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
414 KASAN_PAGE_REDZONE, false);
415
416 return (void *)ptr;
417 }
418
__kasan_krealloc(const void * object,size_t size,gfp_t flags)419 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
420 {
421 struct slab *slab;
422
423 if (unlikely(object == ZERO_SIZE_PTR))
424 return (void *)object;
425
426 /*
427 * Unpoison the object's data.
428 * Part of it might already have been unpoisoned, but it's unknown
429 * how big that part is.
430 */
431 kasan_unpoison(object, size, false);
432
433 slab = virt_to_slab(object);
434
435 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
436 if (unlikely(!slab))
437 return __kasan_kmalloc_large(object, size, flags);
438 else
439 return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
440 }
441
__kasan_check_byte(const void * address,unsigned long ip)442 bool __kasan_check_byte(const void *address, unsigned long ip)
443 {
444 if (!kasan_byte_accessible(address)) {
445 kasan_report((unsigned long)address, 1, false, ip);
446 return false;
447 }
448 return true;
449 }
450