Lines Matching refs:objp

332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)  in dbg_redzone1()  argument
335 return (unsigned long long *) (objp + obj_offset(cachep) - in dbg_redzone1()
339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) in dbg_redzone2() argument
343 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
346 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
350 static void **dbg_userword(struct kmem_cache *cachep, void *objp) in dbg_userword() argument
353 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) argument
552 struct slab *slab, void *objp) in cache_free_pfmemalloc() argument
562 free_block(cachep, &objp, 1, slab_node, &list); in cache_free_pfmemalloc()
592 static __always_inline void __free_one(struct array_cache *ac, void *objp) in __free_one() argument
596 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp)) in __free_one()
598 ac->entry[ac->avail++] = objp; in __free_one()
616 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
747 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, in __cache_free_alien() argument
765 __free_one(ac, objp); in __cache_free_alien()
771 free_block(cachep, &objp, 1, slab_node, &list); in __cache_free_alien()
778 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
780 int slab_node = slab_nid(virt_to_slab(objp)); in cache_free_alien()
789 return __cache_free_alien(cachep, objp, node, slab_node); in cache_free_alien()
1415 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) in slab_kernel_map() argument
1420 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
1424 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1470 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) in print_objinfo() argument
1477 *dbg_redzone1(cachep, objp), in print_objinfo()
1478 *dbg_redzone2(cachep, objp)); in print_objinfo()
1482 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); in print_objinfo()
1483 realobj = (char *)objp + obj_offset(cachep); in print_objinfo()
1494 static void check_poison_obj(struct kmem_cache *cachep, void *objp) in check_poison_obj() argument
1503 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1518 print_objinfo(cachep, objp, 0); in check_poison_obj()
1537 struct slab *slab = virt_to_slab(objp); in check_poison_obj()
1540 objnr = obj_to_index(cachep, slab, objp); in check_poison_obj()
1542 objp = index_to_obj(cachep, slab, objnr - 1); in check_poison_obj()
1543 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1545 print_objinfo(cachep, objp, 2); in check_poison_obj()
1548 objp = index_to_obj(cachep, slab, objnr + 1); in check_poison_obj()
1549 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1551 print_objinfo(cachep, objp, 2); in check_poison_obj()
1569 void *objp = index_to_obj(cachep, slab, i); in slab_destroy_debugcheck() local
1572 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1573 slab_kernel_map(cachep, objp, 1); in slab_destroy_debugcheck()
1576 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1578 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
2315 void *objp = index_to_obj(cachep, slab, i); in cache_init_objs_debug() local
2318 *dbg_userword(cachep, objp) = NULL; in cache_init_objs_debug()
2321 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2322 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2331 objp + obj_offset(cachep)); in cache_init_objs_debug()
2332 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs_debug()
2334 cachep, objp + obj_offset(cachep)); in cache_init_objs_debug()
2338 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2340 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2345 poison_obj(cachep, objp, POISON_FREE); in cache_init_objs_debug()
2346 slab_kernel_map(cachep, objp, 0); in cache_init_objs_debug()
2456 void *objp; in cache_init_objs() local
2470 objp = index_to_obj(cachep, slab, i); in cache_init_objs()
2471 objp = kasan_init_slab_obj(cachep, objp); in cache_init_objs()
2475 kasan_unpoison_object_data(cachep, objp); in cache_init_objs()
2476 cachep->ctor(objp); in cache_init_objs()
2477 kasan_poison_object_data(cachep, objp); in cache_init_objs()
2487 void *objp; in slab_get_obj() local
2489 objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active)); in slab_get_obj()
2492 return objp; in slab_get_obj()
2496 struct slab *slab, void *objp) in slab_put_obj() argument
2498 unsigned int objnr = obj_to_index(cachep, slab, objp); in slab_put_obj()
2506 cachep->name, objp); in slab_put_obj()
2513 slab->freelist = objp + obj_offset(cachep); in slab_put_obj()
2634 static void kfree_debugcheck(const void *objp) in kfree_debugcheck() argument
2636 if (!virt_addr_valid(objp)) { in kfree_debugcheck()
2638 (unsigned long)objp); in kfree_debugcheck()
2665 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, in cache_free_debugcheck() argument
2671 BUG_ON(virt_to_cache(objp) != cachep); in cache_free_debugcheck()
2673 objp -= obj_offset(cachep); in cache_free_debugcheck()
2674 kfree_debugcheck(objp); in cache_free_debugcheck()
2675 slab = virt_to_slab(objp); in cache_free_debugcheck()
2678 verify_redzone_free(cachep, objp); in cache_free_debugcheck()
2679 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2680 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2683 *dbg_userword(cachep, objp) = (void *)caller; in cache_free_debugcheck()
2685 objnr = obj_to_index(cachep, slab, objp); in cache_free_debugcheck()
2688 BUG_ON(objp != index_to_obj(cachep, slab, objnr)); in cache_free_debugcheck()
2691 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2692 slab_kernel_map(cachep, objp, 0); in cache_free_debugcheck()
2694 return objp; in cache_free_debugcheck()
2699 #define cache_free_debugcheck(x, objp, z) (objp) argument
2707 void *objp; in fixup_objfreelist_debug() local
2710 objp = next - obj_offset(cachep); in fixup_objfreelist_debug()
2712 poison_obj(cachep, objp, POISON_FREE); in fixup_objfreelist_debug()
2729 void **objp = slab->freelist; in fixup_slab_list() local
2731 *objp = *list; in fixup_slab_list()
2732 *list = objp; in fixup_slab_list()
2944 gfp_t flags, void *objp, unsigned long caller) in cache_alloc_debugcheck_after() argument
2947 if (!objp || is_kfence_address(objp)) in cache_alloc_debugcheck_after()
2948 return objp; in cache_alloc_debugcheck_after()
2950 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
2951 slab_kernel_map(cachep, objp, 1); in cache_alloc_debugcheck_after()
2952 poison_obj(cachep, objp, POISON_INUSE); in cache_alloc_debugcheck_after()
2955 *dbg_userword(cachep, objp) = (void *)caller; in cache_alloc_debugcheck_after()
2958 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || in cache_alloc_debugcheck_after()
2959 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { in cache_alloc_debugcheck_after()
2962 objp, *dbg_redzone1(cachep, objp), in cache_alloc_debugcheck_after()
2963 *dbg_redzone2(cachep, objp)); in cache_alloc_debugcheck_after()
2965 *dbg_redzone1(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2966 *dbg_redzone2(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2969 objp += obj_offset(cachep); in cache_alloc_debugcheck_after()
2971 cachep->ctor(objp); in cache_alloc_debugcheck_after()
2972 if ((unsigned long)objp & (arch_slab_minalign() - 1)) { in cache_alloc_debugcheck_after()
2973 pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp, in cache_alloc_debugcheck_after()
2976 return objp; in cache_alloc_debugcheck_after()
2979 #define cache_alloc_debugcheck_after(a, b, objp, d) (objp) argument
2984 void *objp; in ____cache_alloc() local
2992 objp = ac->entry[--ac->avail]; in ____cache_alloc()
2999 objp = cache_alloc_refill(cachep, flags); in ____cache_alloc()
3012 if (objp) in ____cache_alloc()
3014 return objp; in ____cache_alloc()
3167 void *objp = NULL; in __do_cache_alloc() local
3172 objp = alternate_node_alloc(cachep, flags); in __do_cache_alloc()
3173 if (objp) in __do_cache_alloc()
3182 objp = ____cache_alloc(cachep, flags); in __do_cache_alloc()
3185 objp = ____cache_alloc(cachep, flags); in __do_cache_alloc()
3188 objp = fallback_alloc(cachep, flags); in __do_cache_alloc()
3196 if (!objp) in __do_cache_alloc()
3197 objp = ____cache_alloc_node(cachep, flags, nodeid); in __do_cache_alloc()
3199 return objp; in __do_cache_alloc()
3216 void *objp; in slab_alloc_node() local
3225 objp = kfence_alloc(cachep, orig_size, flags); in slab_alloc_node()
3226 if (unlikely(objp)) in slab_alloc_node()
3230 objp = __do_cache_alloc(cachep, flags, nodeid); in slab_alloc_node()
3232 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc_node()
3233 prefetchw(objp); in slab_alloc_node()
3237 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init, in slab_alloc_node()
3239 return objp; in slab_alloc_node()
3264 void *objp; in free_block() local
3267 objp = objpp[i]; in free_block()
3269 slab = virt_to_slab(objp); in free_block()
3272 slab_put_obj(cachep, slab, objp); in free_block()
3348 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, in __cache_free() argument
3353 memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); in __cache_free()
3355 if (is_kfence_address(objp)) { in __cache_free()
3356 kmemleak_free_recursive(objp, cachep->flags); in __cache_free()
3357 __kfence_free(objp); in __cache_free()
3368 memset(objp, 0, cachep->object_size); in __cache_free()
3370 if (kasan_slab_free(cachep, objp, init)) in __cache_free()
3375 __kcsan_check_access(objp, cachep->object_size, in __cache_free()
3378 ___cache_free(cachep, objp, caller); in __cache_free()
3381 void ___cache_free(struct kmem_cache *cachep, void *objp, in ___cache_free() argument
3387 kmemleak_free_recursive(objp, cachep->flags); in ___cache_free()
3388 objp = cache_free_debugcheck(cachep, objp, caller); in ___cache_free()
3397 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) in ___cache_free()
3408 struct slab *slab = virt_to_slab(objp); in ___cache_free()
3411 cache_free_pfmemalloc(cachep, slab, objp); in ___cache_free()
3416 __free_one(ac, objp); in ___cache_free()
3466 void *objp = kfence_alloc(s, s->object_size, flags) ?: in kmem_cache_alloc_bulk() local
3469 if (unlikely(!objp)) in kmem_cache_alloc_bulk()
3471 p[i] = objp; in kmem_cache_alloc_bulk()
3530 void *objp; in __kmem_obj_info() local
3536 objp = object - obj_offset(cachep); in __kmem_obj_info()
3538 slab = virt_to_slab(objp); in __kmem_obj_info()
3539 objnr = obj_to_index(cachep, slab, objp); in __kmem_obj_info()
3540 objp = index_to_obj(cachep, slab, objnr); in __kmem_obj_info()
3541 kpp->kp_objp = objp; in __kmem_obj_info()
3543 kpp->kp_ret = *dbg_userword(cachep, objp); in __kmem_obj_info()
3548 void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp, in __do_kmem_cache_free() argument
3554 debug_check_no_locks_freed(objp, cachep->object_size); in __do_kmem_cache_free()
3556 debug_check_no_obj_freed(objp, cachep->object_size); in __do_kmem_cache_free()
3557 __cache_free(cachep, objp, caller); in __do_kmem_cache_free()
3561 void __kmem_cache_free(struct kmem_cache *cachep, void *objp, in __kmem_cache_free() argument
3564 __do_kmem_cache_free(cachep, objp, caller); in __kmem_cache_free()
3575 void kmem_cache_free(struct kmem_cache *cachep, void *objp) in kmem_cache_free() argument
3577 cachep = cache_from_obj(cachep, objp); in kmem_cache_free()
3581 trace_kmem_cache_free(_RET_IP_, objp, cachep); in kmem_cache_free()
3582 __do_kmem_cache_free(cachep, objp, _RET_IP_); in kmem_cache_free()
3592 void *objp = p[i]; in kmem_cache_free_bulk() local
3596 struct folio *folio = virt_to_folio(objp); in kmem_cache_free_bulk()
3601 free_large_kmalloc(folio, objp); in kmem_cache_free_bulk()
3607 s = cache_from_obj(orig_s, objp); in kmem_cache_free_bulk()
3613 debug_check_no_locks_freed(objp, s->object_size); in kmem_cache_free_bulk()
3615 debug_check_no_obj_freed(objp, s->object_size); in kmem_cache_free_bulk()
3617 __cache_free(s, objp, _RET_IP_); in kmem_cache_free_bulk()