Lines Matching refs:meta

243 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)  in metadata_to_pageaddr()  argument
245 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; in metadata_to_pageaddr()
251 if (KFENCE_WARN_ON(meta < kfence_metadata || in metadata_to_pageaddr()
252 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) in metadata_to_pageaddr()
259 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) in metadata_to_pageaddr()
270 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, in metadata_update_state() argument
274 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; in metadata_update_state()
276 lockdep_assert_held(&meta->lock); in metadata_update_state()
298 WRITE_ONCE(meta->state, next); in metadata_update_state()
311 struct kfence_metadata *meta; in check_canary_byte() local
319 meta = addr_to_metadata((unsigned long)addr); in check_canary_byte()
320 raw_spin_lock_irqsave(&meta->lock, flags); in check_canary_byte()
321 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION); in check_canary_byte()
322 raw_spin_unlock_irqrestore(&meta->lock, flags); in check_canary_byte()
328 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) in for_each_canary() argument
330 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); in for_each_canary()
343 for (addr = pageaddr; addr < meta->addr; addr++) { in for_each_canary()
349 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) { in for_each_canary()
359 struct kfence_metadata *meta = NULL; in kfence_guarded_alloc() local
370 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); in kfence_guarded_alloc()
371 list_del_init(&meta->list); in kfence_guarded_alloc()
374 if (!meta) { in kfence_guarded_alloc()
379 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { in kfence_guarded_alloc()
391 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_alloc()
397 meta->addr = metadata_to_pageaddr(meta); in kfence_guarded_alloc()
399 if (meta->state == KFENCE_OBJECT_FREED) in kfence_guarded_alloc()
400 kfence_unprotect(meta->addr); in kfence_guarded_alloc()
412 meta->addr += PAGE_SIZE - size; in kfence_guarded_alloc()
413 meta->addr = ALIGN_DOWN(meta->addr, cache->align); in kfence_guarded_alloc()
416 addr = (void *)meta->addr; in kfence_guarded_alloc()
419 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); in kfence_guarded_alloc()
421 WRITE_ONCE(meta->cache, cache); in kfence_guarded_alloc()
422 meta->size = size; in kfence_guarded_alloc()
423 meta->alloc_stack_hash = alloc_stack_hash; in kfence_guarded_alloc()
424 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_alloc()
429 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
438 for_each_canary(meta, set_canary_byte); in kfence_guarded_alloc()
451 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ in kfence_guarded_alloc()
459 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) in kfence_guarded_free() argument
465 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_guarded_free()
467 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { in kfence_guarded_free()
470 kfence_report_error((unsigned long)addr, false, NULL, meta, in kfence_guarded_free()
472 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
485 if (meta->unprotected_page) { in kfence_guarded_free()
486 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); in kfence_guarded_free()
487 kfence_protect(meta->unprotected_page); in kfence_guarded_free()
488 meta->unprotected_page = 0; in kfence_guarded_free()
492 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); in kfence_guarded_free()
493 init = slab_want_init_on_free(meta->cache); in kfence_guarded_free()
494 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_guarded_free()
496 alloc_covered_add(meta->alloc_stack_hash, -1); in kfence_guarded_free()
499 for_each_canary(meta, check_canary_byte); in kfence_guarded_free()
507 memzero_explicit(addr, meta->size); in kfence_guarded_free()
516 KFENCE_WARN_ON(!list_empty(&meta->list)); in kfence_guarded_free()
517 list_add_tail(&meta->list, &kfence_freelist); in kfence_guarded_free()
530 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); in rcu_guarded_free() local
532 kfence_guarded_free((void *)meta->addr, meta, false); in rcu_guarded_free()
590 struct kfence_metadata *meta = &kfence_metadata[i]; in kfence_init_pool() local
593 INIT_LIST_HEAD(&meta->list); in kfence_init_pool()
594 raw_spin_lock_init(&meta->lock); in kfence_init_pool()
595 meta->state = KFENCE_OBJECT_UNUSED; in kfence_init_pool()
596 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ in kfence_init_pool()
597 list_add_tail(&meta->list, &kfence_freelist); in kfence_init_pool()
711 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; in show_object() local
714 raw_spin_lock_irqsave(&meta->lock, flags); in show_object()
715 kfence_print_object(seq, meta); in show_object()
716 raw_spin_unlock_irqrestore(&meta->lock, flags); in show_object()
748 struct kfence_metadata *meta = &kfence_metadata[i]; in kfence_check_all_canary() local
750 if (meta->state == KFENCE_OBJECT_ALLOCATED) in kfence_check_all_canary()
751 for_each_canary(meta, check_canary_byte); in kfence_check_all_canary()
913 struct kfence_metadata *meta; in kfence_shutdown_cache() local
919 meta = &kfence_metadata[i]; in kfence_shutdown_cache()
928 if (READ_ONCE(meta->cache) != s || in kfence_shutdown_cache()
929 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) in kfence_shutdown_cache()
932 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
933 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; in kfence_shutdown_cache()
934 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
951 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); in kfence_shutdown_cache()
956 meta = &kfence_metadata[i]; in kfence_shutdown_cache()
959 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
962 raw_spin_lock_irqsave(&meta->lock, flags); in kfence_shutdown_cache()
963 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) in kfence_shutdown_cache()
964 meta->cache = NULL; in kfence_shutdown_cache()
965 raw_spin_unlock_irqrestore(&meta->lock, flags); in kfence_shutdown_cache()
1044 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); in kfence_ksize() local
1050 return meta ? meta->size : 0; in kfence_ksize()
1055 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); in kfence_object_start() local
1061 return meta ? (void *)meta->addr : NULL; in kfence_object_start()
1066 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); in __kfence_free() local
1069 KFENCE_WARN_ON(meta->objcg); in __kfence_free()
1077 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) in __kfence_free()
1078 call_rcu(&meta->rcu_head, rcu_guarded_free); in __kfence_free()
1080 kfence_guarded_free(addr, meta, false); in __kfence_free()
1100 struct kfence_metadata *meta; in kfence_handle_page_fault() local
1103 meta = addr_to_metadata(addr - PAGE_SIZE); in kfence_handle_page_fault()
1104 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { in kfence_handle_page_fault()
1105 to_report = meta; in kfence_handle_page_fault()
1107 distance = addr - data_race(meta->addr + meta->size); in kfence_handle_page_fault()
1110 meta = addr_to_metadata(addr + PAGE_SIZE); in kfence_handle_page_fault()
1111 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { in kfence_handle_page_fault()
1113 if (!to_report || distance > data_race(meta->addr) - addr) in kfence_handle_page_fault()
1114 to_report = meta; in kfence_handle_page_fault()