Lines Matching +full:i +full:- +full:cache +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
45 track->pid = current->pid; in kasan_set_track()
46 track->stack = kasan_save_stack(flags); in kasan_set_track()
52 current->kasan_depth++; in kasan_enable_current()
58 current->kasan_depth--; in kasan_disable_current()
64 void __kasan_unpoison_range(const void *address, size_t size) in __kasan_unpoison_range() argument
66 kasan_unpoison(address, size, false); in __kasan_unpoison_range()
86 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below()
88 kasan_unpoison(base, watermark - base, false); in kasan_unpoison_task_stack_below()
93 * Only allow cache merging when stack collection is disabled and no metadata
106 unsigned long i; in __kasan_unpoison_pages() local
112 for (i = 0; i < (1 << order); i++) in __kasan_unpoison_pages()
113 page_kasan_tag_set(page + i, tag); in __kasan_unpoison_pages()
131 object_size <= 64 - 16 ? 16 : in optimal_redzone()
132 object_size <= 128 - 32 ? 32 : in optimal_redzone()
133 object_size <= 512 - 64 ? 64 : in optimal_redzone()
134 object_size <= 4096 - 128 ? 128 : in optimal_redzone()
135 object_size <= (1 << 14) - 256 ? 256 : in optimal_redzone()
136 object_size <= (1 << 15) - 512 ? 512 : in optimal_redzone()
137 object_size <= (1 << 16) - 1024 ? 1024 : 2048; in optimal_redzone()
140 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, in __kasan_cache_create() argument
149 * 1. In slab_ksize() when calculating the size of the accessible in __kasan_cache_create()
158 ok_size = *size; in __kasan_cache_create()
161 cache->kasan_info.alloc_meta_offset = *size; in __kasan_cache_create()
162 *size += sizeof(struct kasan_alloc_meta); in __kasan_cache_create()
170 if (*size > KMALLOC_MAX_SIZE) { in __kasan_cache_create()
171 cache->kasan_info.alloc_meta_offset = 0; in __kasan_cache_create()
172 *size = ok_size; in __kasan_cache_create()
178 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; in __kasan_cache_create()
190 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied. in __kasan_cache_create()
192 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || in __kasan_cache_create()
193 cache->object_size < sizeof(struct kasan_free_meta)) { in __kasan_cache_create()
194 ok_size = *size; in __kasan_cache_create()
196 cache->kasan_info.free_meta_offset = *size; in __kasan_cache_create()
197 *size += sizeof(struct kasan_free_meta); in __kasan_cache_create()
200 if (*size > KMALLOC_MAX_SIZE) { in __kasan_cache_create()
201 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; in __kasan_cache_create()
202 *size = ok_size; in __kasan_cache_create()
206 /* Calculate size with optimal redzone. */ in __kasan_cache_create()
207 optimal_size = cache->object_size + optimal_redzone(cache->object_size); in __kasan_cache_create()
211 /* Use optimal size if the size with added metas is not large enough. */ in __kasan_cache_create()
212 if (*size < optimal_size) in __kasan_cache_create()
213 *size = optimal_size; in __kasan_cache_create()
216 void __kasan_cache_create_kmalloc(struct kmem_cache *cache) in __kasan_cache_create_kmalloc() argument
218 cache->kasan_info.is_kmalloc = true; in __kasan_cache_create_kmalloc()
221 size_t __kasan_metadata_size(struct kmem_cache *cache) in __kasan_metadata_size() argument
225 return (cache->kasan_info.alloc_meta_offset ? in __kasan_metadata_size()
227 (cache->kasan_info.free_meta_offset ? in __kasan_metadata_size()
231 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, in kasan_get_alloc_meta() argument
234 if (!cache->kasan_info.alloc_meta_offset) in kasan_get_alloc_meta()
236 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; in kasan_get_alloc_meta()
240 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, in kasan_get_free_meta() argument
244 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) in kasan_get_free_meta()
246 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; in kasan_get_free_meta()
252 unsigned long i; in __kasan_poison_slab() local
254 for (i = 0; i < compound_nr(page); i++) in __kasan_poison_slab()
255 page_kasan_tag_reset(page + i); in __kasan_poison_slab()
260 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in __kasan_unpoison_object_data() argument
262 kasan_unpoison(object, cache->object_size, false); in __kasan_unpoison_object_data()
265 void __kasan_poison_object_data(struct kmem_cache *cache, void *object) in __kasan_poison_object_data() argument
267 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in __kasan_poison_object_data()
273 * 1. A cache might have a constructor, which might save a pointer to a slab
277 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
285 static inline u8 assign_tag(struct kmem_cache *cache, in assign_tag() argument
292 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU in assign_tag()
295 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) in assign_tag()
301 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); in assign_tag()
311 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, in __kasan_init_slab_obj() argument
317 alloc_meta = kasan_get_alloc_meta(cache, object); in __kasan_init_slab_obj()
323 object = set_tag(object, assign_tag(cache, object, true)); in __kasan_init_slab_obj()
328 static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, in ____kasan_slab_free() argument
344 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != in ____kasan_slab_free()
351 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) in ____kasan_slab_free()
359 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), in ____kasan_slab_free()
366 kasan_set_free_info(cache, object, tag); in ____kasan_slab_free()
368 return kasan_quarantine_put(cache, object); in ____kasan_slab_free()
371 bool __kasan_slab_free(struct kmem_cache *cache, void *object, in __kasan_slab_free() argument
374 return ____kasan_slab_free(cache, object, ip, true, init); in __kasan_slab_free()
411 * !PageSlab() when the size provided to kmalloc is larger than in __kasan_slab_free_mempool()
419 ____kasan_slab_free(page->slab_cache, ptr, ip, false, false); in __kasan_slab_free_mempool()
423 static void set_alloc_info(struct kmem_cache *cache, void *object, in set_alloc_info() argument
429 if (cache->kasan_info.is_kmalloc && !is_kmalloc) in set_alloc_info()
432 alloc_meta = kasan_get_alloc_meta(cache, object); in set_alloc_info()
434 kasan_set_track(&alloc_meta->alloc_track, flags); in set_alloc_info()
437 void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, in __kasan_slab_alloc() argument
453 * Generate and assign random tag for tag-based modes. in __kasan_slab_alloc()
456 tag = assign_tag(cache, object, false); in __kasan_slab_alloc()
463 kasan_unpoison(tagged_object, cache->object_size, init); in __kasan_slab_alloc()
465 /* Save alloc info (if possible) for non-kmalloc() allocations. */ in __kasan_slab_alloc()
467 set_alloc_info(cache, (void *)object, flags, false); in __kasan_slab_alloc()
472 static inline void *____kasan_kmalloc(struct kmem_cache *cache, in ____kasan_kmalloc() argument
473 const void *object, size_t size, gfp_t flags) in ____kasan_kmalloc() argument
493 * The redzone has byte-level precision for the generic mode. in ____kasan_kmalloc()
498 kasan_poison_last_granule((void *)object, size); in ____kasan_kmalloc()
501 redzone_start = round_up((unsigned long)(object + size), in ____kasan_kmalloc()
503 redzone_end = round_up((unsigned long)(object + cache->object_size), in ____kasan_kmalloc()
505 kasan_poison((void *)redzone_start, redzone_end - redzone_start, in ____kasan_kmalloc()
513 set_alloc_info(cache, (void *)object, flags, true); in ____kasan_kmalloc()
519 void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, in __kasan_kmalloc() argument
520 size_t size, gfp_t flags) in __kasan_kmalloc() argument
522 return ____kasan_kmalloc(cache, object, size, flags); in __kasan_kmalloc()
526 void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, in __kasan_kmalloc_large() argument
544 * The redzone has byte-level precision for the generic mode. in __kasan_kmalloc_large()
549 kasan_poison_last_granule(ptr, size); in __kasan_kmalloc_large()
552 redzone_start = round_up((unsigned long)(ptr + size), in __kasan_kmalloc_large()
555 kasan_poison((void *)redzone_start, redzone_end - redzone_start, in __kasan_kmalloc_large()
561 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) in __kasan_krealloc() argument
573 kasan_unpoison(object, size, false); in __kasan_krealloc()
577 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ in __kasan_krealloc()
579 return __kasan_kmalloc_large(object, size, flags); in __kasan_krealloc()
581 return ____kasan_kmalloc(page->slab_cache, object, size, flags); in __kasan_krealloc()