Lines Matching +full:cache +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
55 track->pid = current->pid; in kasan_set_track()
56 track->stack = kasan_save_stack(flags); in kasan_set_track()
61 current->kasan_depth++; in kasan_enable_current()
66 current->kasan_depth--; in kasan_disable_current()
69 bool __kasan_check_read(const volatile void *p, unsigned int size) in __kasan_check_read() argument
71 return check_memory_region((unsigned long)p, size, false, _RET_IP_); in __kasan_check_read()
75 bool __kasan_check_write(const volatile void *p, unsigned int size) in __kasan_check_write() argument
77 return check_memory_region((unsigned long)p, size, true, _RET_IP_); in __kasan_check_write()
113 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
116 void kasan_poison_shadow(const void *address, size_t size, u8 value) in kasan_poison_shadow() argument
128 shadow_end = kasan_mem_to_shadow(address + size); in kasan_poison_shadow()
130 __memset(shadow_start, value, shadow_end - shadow_start); in kasan_poison_shadow()
133 void kasan_unpoison_shadow(const void *address, size_t size) in kasan_unpoison_shadow() argument
144 kasan_poison_shadow(address, size, tag); in kasan_unpoison_shadow()
146 if (size & KASAN_SHADOW_MASK) { in kasan_unpoison_shadow()
147 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); in kasan_unpoison_shadow()
152 *shadow = size & KASAN_SHADOW_MASK; in kasan_unpoison_shadow()
159 size_t size = sp - base; in __kasan_unpoison_stack() local
161 kasan_unpoison_shadow(base, size); in __kasan_unpoison_stack()
178 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below()
180 kasan_unpoison_shadow(base, watermark - base); in kasan_unpoison_task_stack_below()
215 object_size <= 64 - 16 ? 16 : in optimal_redzone()
216 object_size <= 128 - 32 ? 32 : in optimal_redzone()
217 object_size <= 512 - 64 ? 64 : in optimal_redzone()
218 object_size <= 4096 - 128 ? 128 : in optimal_redzone()
219 object_size <= (1 << 14) - 256 ? 256 : in optimal_redzone()
220 object_size <= (1 << 15) - 512 ? 512 : in optimal_redzone()
221 object_size <= (1 << 16) - 1024 ? 1024 : 2048; in optimal_redzone()
224 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in kasan_cache_create() argument
227 unsigned int orig_size = *size; in kasan_cache_create()
232 cache->kasan_info.alloc_meta_offset = *size; in kasan_cache_create()
233 *size += sizeof(struct kasan_alloc_meta); in kasan_cache_create()
237 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || in kasan_cache_create()
238 cache->object_size < sizeof(struct kasan_free_meta))) { in kasan_cache_create()
239 cache->kasan_info.free_meta_offset = *size; in kasan_cache_create()
240 *size += sizeof(struct kasan_free_meta); in kasan_cache_create()
243 redzone_size = optimal_redzone(cache->object_size); in kasan_cache_create()
244 redzone_adjust = redzone_size - (*size - cache->object_size); in kasan_cache_create()
246 *size += redzone_adjust; in kasan_cache_create()
248 *size = min_t(unsigned int, KMALLOC_MAX_SIZE, in kasan_cache_create()
249 max(*size, cache->object_size + redzone_size)); in kasan_cache_create()
254 if (*size <= cache->kasan_info.alloc_meta_offset || in kasan_cache_create()
255 *size <= cache->kasan_info.free_meta_offset) { in kasan_cache_create()
256 cache->kasan_info.alloc_meta_offset = 0; in kasan_cache_create()
257 cache->kasan_info.free_meta_offset = 0; in kasan_cache_create()
258 *size = orig_size; in kasan_cache_create()
265 size_t kasan_metadata_size(struct kmem_cache *cache) in kasan_metadata_size() argument
267 return (cache->kasan_info.alloc_meta_offset ? in kasan_metadata_size()
269 (cache->kasan_info.free_meta_offset ? in kasan_metadata_size()
273 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, in get_alloc_info() argument
276 return (void *)object + cache->kasan_info.alloc_meta_offset; in get_alloc_info()
279 struct kasan_free_meta *get_free_info(struct kmem_cache *cache, in get_free_info() argument
283 return (void *)object + cache->kasan_info.free_meta_offset; in get_free_info()
296 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in kasan_unpoison_object_data() argument
298 kasan_unpoison_shadow(object, cache->object_size); in kasan_unpoison_object_data()
301 void kasan_poison_object_data(struct kmem_cache *cache, void *object) in kasan_poison_object_data() argument
304 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), in kasan_poison_object_data()
310 * 1. A cache might have a constructor, which might save a pointer to a slab
314 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
322 static u8 assign_tag(struct kmem_cache *cache, const void *object, in assign_tag() argument
335 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU in assign_tag()
338 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag()
344 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); in assign_tag()
354 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, in kasan_init_slab_obj() argument
359 if (!(cache->flags & SLAB_KASAN)) in kasan_init_slab_obj()
362 alloc_info = get_alloc_info(cache, object); in kasan_init_slab_obj()
367 assign_tag(cache, object, true, false)); in kasan_init_slab_obj()
387 static bool __kasan_slab_free(struct kmem_cache *cache, void *object, in __kasan_slab_free() argument
399 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != in __kasan_slab_free()
406 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) in __kasan_slab_free()
415 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); in __kasan_slab_free()
419 unlikely(!(cache->flags & SLAB_KASAN))) in __kasan_slab_free()
422 kasan_set_free_info(cache, object, tag); in __kasan_slab_free()
424 quarantine_put(get_free_info(cache, object), cache); in __kasan_slab_free()
429 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) in kasan_slab_free() argument
431 return __kasan_slab_free(cache, object, ip, true); in kasan_slab_free()
434 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, in __kasan_kmalloc() argument
435 size_t size, gfp_t flags, bool keep_tag) in __kasan_kmalloc() argument
447 redzone_start = round_up((unsigned long)(object + size), in __kasan_kmalloc()
449 redzone_end = round_up((unsigned long)object + cache->object_size, in __kasan_kmalloc()
453 tag = assign_tag(cache, object, false, keep_tag); in __kasan_kmalloc()
456 kasan_unpoison_shadow(set_tag(object, tag), size); in __kasan_kmalloc()
457 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, in __kasan_kmalloc()
460 if (cache->flags & SLAB_KASAN) in __kasan_kmalloc()
461 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags); in __kasan_kmalloc()
466 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, in kasan_slab_alloc() argument
469 return __kasan_kmalloc(cache, object, cache->object_size, flags, false); in kasan_slab_alloc()
472 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, in kasan_kmalloc() argument
473 size_t size, gfp_t flags) in kasan_kmalloc() argument
475 return __kasan_kmalloc(cache, object, size, flags, true); in kasan_kmalloc()
479 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, in kasan_kmalloc_large() argument
493 redzone_start = round_up((unsigned long)(ptr + size), in kasan_kmalloc_large()
497 kasan_unpoison_shadow(ptr, size); in kasan_kmalloc_large()
498 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, in kasan_kmalloc_large()
504 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags) in kasan_krealloc() argument
514 return kasan_kmalloc_large(object, size, flags); in kasan_krealloc()
516 return __kasan_kmalloc(page->slab_cache, object, size, in kasan_krealloc()
533 __kasan_slab_free(page->slab_cache, ptr, ip, false); in kasan_poison_kfree()
545 int kasan_module_alloc(void *addr, size_t size) in kasan_module_alloc() argument
553 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT; in kasan_module_alloc()
557 return -EINVAL; in kasan_module_alloc()
567 find_vm_area(addr)->flags |= VM_KASAN; in kasan_module_alloc()
572 return -ENOMEM; in kasan_module_alloc()
577 if (vm->flags & VM_KASAN) in kasan_free_shadow()
578 vfree(kasan_mem_to_shadow(vm->addr)); in kasan_free_shadow()
602 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse in shadow_mapped()
624 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; in kasan_mem_notifier()
625 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); in kasan_mem_notifier()
630 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || in kasan_mem_notifier()
649 pfn_to_nid(mem_data->start_pfn), in kasan_mem_notifier()
665 * Non-NULL result of the find_vm_area() will tell us if in kasan_mem_notifier()
704 return -ENOMEM; in kasan_populate_vmalloc_pte()
720 int kasan_populate_vmalloc(unsigned long addr, unsigned long size) in kasan_populate_vmalloc() argument
730 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); in kasan_populate_vmalloc()
734 shadow_end - shadow_start, in kasan_populate_vmalloc()
742 * We need to be careful about inter-cpu effects here. Consider: in kasan_populate_vmalloc()
752 * // let a = area->addr in kasan_populate_vmalloc()
769 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in in kasan_populate_vmalloc()
773 * releasing a page-table lock, which will provide the barrier. in kasan_populate_vmalloc()
783 void kasan_poison_vmalloc(const void *start, unsigned long size) in kasan_poison_vmalloc() argument
788 size = round_up(size, KASAN_SHADOW_SCALE_SIZE); in kasan_poison_vmalloc()
789 kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID); in kasan_poison_vmalloc()
792 void kasan_unpoison_vmalloc(const void *start, unsigned long size) in kasan_unpoison_vmalloc() argument
797 kasan_unpoison_shadow(start, size); in kasan_unpoison_vmalloc()
827 * -------------------
830 * That might not map onto the shadow in a way that is page-aligned:
835 * -------- -------- -------- -------- --------
837 * | | | /-------/ |
838 * \-------\|/------/ |/---------------/
849 * partially covered shadow pages - (1) and (3) in the example. For this,
857 * -------- -------- -------- -------- --------
859 * | | | /-------/ |
860 * \-------\|/------/ |/---------------/
867 * page (1) - we know no allocation currently uses anything in that page,
876 * -----------
887 * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
891 * trouble - any simultaneous allocations will be for disjoint regions.
899 unsigned long size; in kasan_release_vmalloc() local
909 region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE; in kasan_release_vmalloc()
922 size = shadow_end - shadow_start; in kasan_release_vmalloc()
925 size, kasan_depopulate_vmalloc_pte, in kasan_release_vmalloc()