Lines Matching refs:cachep
211 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
213 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
217 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
219 static inline void fixup_slab_list(struct kmem_cache *cachep,
241 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
247 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ argument
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
327 static int obj_offset(struct kmem_cache *cachep) in obj_offset() argument
329 return cachep->obj_offset; in obj_offset()
332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) in dbg_redzone1() argument
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
335 return (unsigned long long*) (objp + obj_offset(cachep) - in dbg_redzone1()
339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) in dbg_redzone2() argument
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
342 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
343 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
346 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
350 static void **dbg_userword(struct kmem_cache *cachep, void *objp) in dbg_userword() argument
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
353 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) argument
367 static inline bool is_store_user_clean(struct kmem_cache *cachep) in is_store_user_clean() argument
369 return atomic_read(&cachep->store_user_clean) == 1; in is_store_user_clean()
372 static inline void set_store_user_clean(struct kmem_cache *cachep) in set_store_user_clean() argument
374 atomic_set(&cachep->store_user_clean, 1); in set_store_user_clean()
377 static inline void set_store_user_dirty(struct kmem_cache *cachep) in set_store_user_dirty() argument
379 if (is_store_user_clean(cachep)) in set_store_user_dirty()
380 atomic_set(&cachep->store_user_clean, 0); in set_store_user_dirty()
384 static inline void set_store_user_dirty(struct kmem_cache *cachep) {} in set_store_user_dirty() argument
434 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) in cpu_cache_get() argument
436 return this_cpu_ptr(cachep->cpu_cache); in cpu_cache_get()
478 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) argument
480 static void __slab_error(const char *function, struct kmem_cache *cachep, in __slab_error() argument
484 function, cachep->name, msg); in __slab_error()
593 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, in cache_free_pfmemalloc() argument
601 n = get_node(cachep, page_node); in cache_free_pfmemalloc()
604 free_block(cachep, &objp, 1, page_node, &list); in cache_free_pfmemalloc()
607 slabs_destroy(cachep, &list); in cache_free_pfmemalloc()
635 #define drain_alien_cache(cachep, alien) do { } while (0) argument
636 #define reap_alien(cachep, n) do { } while (0) argument
648 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
653 static inline void *alternate_node_alloc(struct kmem_cache *cachep, in alternate_node_alloc() argument
659 static inline void *____cache_alloc_node(struct kmem_cache *cachep, in ____cache_alloc_node() argument
724 static void __drain_alien_cache(struct kmem_cache *cachep, in __drain_alien_cache() argument
728 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache()
740 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
749 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
762 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
764 slabs_destroy(cachep, &list); in reap_alien()
770 static void drain_alien_cache(struct kmem_cache *cachep, in drain_alien_cache() argument
785 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
787 slabs_destroy(cachep, &list); in drain_alien_cache()
792 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, in __cache_free_alien() argument
800 n = get_node(cachep, node); in __cache_free_alien()
801 STATS_INC_NODEFREES(cachep); in __cache_free_alien()
807 STATS_INC_ACOVERFLOW(cachep); in __cache_free_alien()
808 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
812 slabs_destroy(cachep, &list); in __cache_free_alien()
814 n = get_node(cachep, page_node); in __cache_free_alien()
816 free_block(cachep, &objp, 1, page_node, &list); in __cache_free_alien()
818 slabs_destroy(cachep, &list); in __cache_free_alien()
823 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
834 return __cache_free_alien(cachep, objp, node, page_node); in cache_free_alien()
847 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument
856 n = get_node(cachep, node); in init_cache_node()
859 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + in init_cache_node()
860 cachep->num; in init_cache_node()
872 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in init_cache_node()
875 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; in init_cache_node()
882 cachep->node[node] = n; in init_cache_node()
900 struct kmem_cache *cachep; in init_cache_node_node() local
902 list_for_each_entry(cachep, &slab_caches, list) { in init_cache_node_node()
903 ret = init_cache_node(cachep, node, GFP_KERNEL); in init_cache_node_node()
912 static int setup_kmem_cache_node(struct kmem_cache *cachep, in setup_kmem_cache_node() argument
923 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in setup_kmem_cache_node()
928 if (cachep->shared) { in setup_kmem_cache_node()
930 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); in setup_kmem_cache_node()
935 ret = init_cache_node(cachep, node, gfp); in setup_kmem_cache_node()
939 n = get_node(cachep, node); in setup_kmem_cache_node()
942 free_block(cachep, n->shared->entry, in setup_kmem_cache_node()
959 slabs_destroy(cachep, &list); in setup_kmem_cache_node()
982 struct kmem_cache *cachep; in cpuup_canceled() local
987 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
993 n = get_node(cachep, node); in cpuup_canceled()
1000 n->free_limit -= cachep->batchcount; in cpuup_canceled()
1003 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
1005 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
1016 free_block(cachep, shared->entry, in cpuup_canceled()
1028 drain_alien_cache(cachep, alien); in cpuup_canceled()
1033 slabs_destroy(cachep, &list); in cpuup_canceled()
1040 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
1041 n = get_node(cachep, node); in cpuup_canceled()
1044 drain_freelist(cachep, n, INT_MAX); in cpuup_canceled()
1050 struct kmem_cache *cachep; in cpuup_prepare() local
1068 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_prepare()
1069 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); in cpuup_prepare()
1139 struct kmem_cache *cachep; in drain_cache_node_node() local
1142 list_for_each_entry(cachep, &slab_caches, list) { in drain_cache_node_node()
1145 n = get_node(cachep, node); in drain_cache_node_node()
1149 drain_freelist(cachep, n, INT_MAX); in drain_cache_node_node()
1196 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, in init_list() argument
1210 MAKE_ALL_LISTS(cachep, ptr, nodeid); in init_list()
1211 cachep->node[nodeid] = ptr; in init_list()
1218 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument
1223 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1224 cachep->node[node]->next_reap = jiffies + in set_up_node()
1226 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in set_up_node()
1317 struct kmem_cache *cachep; in kmem_cache_init_late() local
1321 list_for_each_entry(cachep, &slab_caches, list) in kmem_cache_init_late()
1322 if (enable_cpucache(cachep, GFP_NOWAIT)) in kmem_cache_init_late()
1359 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) in slab_out_of_memory() argument
1374 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1376 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1387 (total_slabs * cachep->num) - free_objs, in slab_out_of_memory()
1388 total_slabs * cachep->num); in slab_out_of_memory()
1401 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages() argument
1407 flags |= cachep->allocflags; in kmem_getpages()
1409 page = __alloc_pages_node(nodeid, flags, cachep->gfporder); in kmem_getpages()
1411 slab_out_of_memory(cachep, flags, nodeid); in kmem_getpages()
1415 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { in kmem_getpages()
1416 __free_pages(page, cachep->gfporder); in kmem_getpages()
1420 nr_pages = (1 << cachep->gfporder); in kmem_getpages()
1421 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1437 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) in kmem_freepages() argument
1439 int order = cachep->gfporder; in kmem_freepages()
1442 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_freepages()
1455 memcg_uncharge_slab(page, order, cachep); in kmem_freepages()
1461 struct kmem_cache *cachep; in kmem_rcu_free() local
1465 cachep = page->slab_cache; in kmem_rcu_free()
1467 kmem_freepages(cachep, page); in kmem_rcu_free()
1471 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) in is_debug_pagealloc_cache() argument
1473 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && in is_debug_pagealloc_cache()
1474 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache()
1481 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, in store_stackinfo() argument
1484 int size = cachep->object_size; in store_stackinfo()
1486 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; in store_stackinfo()
1513 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1516 if (!is_debug_pagealloc_cache(cachep)) in slab_kernel_map()
1520 store_stackinfo(cachep, objp, caller); in slab_kernel_map()
1522 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
1526 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1531 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument
1533 int size = cachep->object_size; in poison_obj()
1534 addr = &((char *)addr)[obj_offset(cachep)]; in poison_obj()
1572 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) in print_objinfo() argument
1577 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1579 *dbg_redzone1(cachep, objp), in print_objinfo()
1580 *dbg_redzone2(cachep, objp)); in print_objinfo()
1583 if (cachep->flags & SLAB_STORE_USER) in print_objinfo()
1584 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); in print_objinfo()
1585 realobj = (char *)objp + obj_offset(cachep); in print_objinfo()
1586 size = cachep->object_size; in print_objinfo()
1596 static void check_poison_obj(struct kmem_cache *cachep, void *objp) in check_poison_obj() argument
1602 if (is_debug_pagealloc_cache(cachep)) in check_poison_obj()
1605 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1606 size = cachep->object_size; in check_poison_obj()
1618 print_tainted(), cachep->name, in check_poison_obj()
1620 print_objinfo(cachep, objp, 0); in check_poison_obj()
1642 objnr = obj_to_index(cachep, page, objp); in check_poison_obj()
1644 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1645 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1647 print_objinfo(cachep, objp, 2); in check_poison_obj()
1649 if (objnr + 1 < cachep->num) { in check_poison_obj()
1650 objp = index_to_obj(cachep, page, objnr + 1); in check_poison_obj()
1651 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1653 print_objinfo(cachep, objp, 2); in check_poison_obj()
1660 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1665 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1666 poison_obj(cachep, page->freelist - obj_offset(cachep), in slab_destroy_debugcheck()
1670 for (i = 0; i < cachep->num; i++) { in slab_destroy_debugcheck()
1671 void *objp = index_to_obj(cachep, page, i); in slab_destroy_debugcheck()
1673 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1674 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1675 slab_kernel_map(cachep, objp, 1, 0); in slab_destroy_debugcheck()
1677 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1678 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1679 slab_error(cachep, "start of a freed object was overwritten"); in slab_destroy_debugcheck()
1680 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1681 slab_error(cachep, "end of a freed object was overwritten"); in slab_destroy_debugcheck()
1686 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1701 static void slab_destroy(struct kmem_cache *cachep, struct page *page) in slab_destroy() argument
1706 slab_destroy_debugcheck(cachep, page); in slab_destroy()
1707 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) in slab_destroy()
1710 kmem_freepages(cachep, page); in slab_destroy()
1716 if (OFF_SLAB(cachep)) in slab_destroy()
1717 kmem_cache_free(cachep->freelist_cache, freelist); in slab_destroy()
1720 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) in slabs_destroy() argument
1726 slab_destroy(cachep, page); in slabs_destroy()
1742 static size_t calculate_slab_order(struct kmem_cache *cachep, in calculate_slab_order() argument
1777 if (freelist_cache->size > cachep->size / 2) in calculate_slab_order()
1782 cachep->num = num; in calculate_slab_order()
1783 cachep->gfporder = gfporder; in calculate_slab_order()
1811 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument
1831 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() argument
1834 return enable_cpucache(cachep, gfp); in setup_cpu_cache()
1836 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); in setup_cpu_cache()
1837 if (!cachep->cpu_cache) in setup_cpu_cache()
1845 set_up_node(cachep, SIZE_NODE); in setup_cpu_cache()
1850 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
1852 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
1853 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
1857 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
1859 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in setup_cpu_cache()
1861 cpu_cache_get(cachep)->avail = 0; in setup_cpu_cache()
1862 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1863 cpu_cache_get(cachep)->batchcount = 1; in setup_cpu_cache()
1864 cpu_cache_get(cachep)->touched = 0; in setup_cpu_cache()
1865 cachep->batchcount = 1; in setup_cpu_cache()
1866 cachep->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1881 struct kmem_cache *cachep; in __kmem_cache_alias() local
1883 cachep = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
1884 if (cachep) { in __kmem_cache_alias()
1885 cachep->refcount++; in __kmem_cache_alias()
1891 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias()
1893 return cachep; in __kmem_cache_alias()
1896 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, in set_objfreelist_slab_cache() argument
1901 cachep->num = 0; in set_objfreelist_slab_cache()
1903 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) in set_objfreelist_slab_cache()
1906 left = calculate_slab_order(cachep, size, in set_objfreelist_slab_cache()
1908 if (!cachep->num) in set_objfreelist_slab_cache()
1911 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) in set_objfreelist_slab_cache()
1914 cachep->colour = left / cachep->colour_off; in set_objfreelist_slab_cache()
1919 static bool set_off_slab_cache(struct kmem_cache *cachep, in set_off_slab_cache() argument
1924 cachep->num = 0; in set_off_slab_cache()
1937 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); in set_off_slab_cache()
1938 if (!cachep->num) in set_off_slab_cache()
1945 if (left >= cachep->num * sizeof(freelist_idx_t)) in set_off_slab_cache()
1948 cachep->colour = left / cachep->colour_off; in set_off_slab_cache()
1953 static bool set_on_slab_cache(struct kmem_cache *cachep, in set_on_slab_cache() argument
1958 cachep->num = 0; in set_on_slab_cache()
1960 left = calculate_slab_order(cachep, size, flags); in set_on_slab_cache()
1961 if (!cachep->num) in set_on_slab_cache()
1964 cachep->colour = left / cachep->colour_off; in set_on_slab_cache()
1990 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) in __kmem_cache_create() argument
1995 unsigned int size = cachep->size; in __kmem_cache_create()
2028 if (ralign < cachep->align) { in __kmem_cache_create()
2029 ralign = cachep->align; in __kmem_cache_create()
2037 cachep->align = ralign; in __kmem_cache_create()
2038 cachep->colour_off = cache_line_size(); in __kmem_cache_create()
2040 if (cachep->colour_off < cachep->align) in __kmem_cache_create()
2041 cachep->colour_off = cachep->align; in __kmem_cache_create()
2056 cachep->obj_offset += sizeof(unsigned long long); in __kmem_cache_create()
2071 kasan_cache_create(cachep, &size, &flags); in __kmem_cache_create()
2073 size = ALIGN(size, cachep->align); in __kmem_cache_create()
2079 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); in __kmem_cache_create()
2090 size >= 256 && cachep->object_size > cache_line_size()) { in __kmem_cache_create()
2094 if (set_off_slab_cache(cachep, tmp_size, flags)) { in __kmem_cache_create()
2096 cachep->obj_offset += tmp_size - size; in __kmem_cache_create()
2104 if (set_objfreelist_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2109 if (set_off_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2114 if (set_on_slab_cache(cachep, size, flags)) in __kmem_cache_create()
2120 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); in __kmem_cache_create()
2121 cachep->flags = flags; in __kmem_cache_create()
2122 cachep->allocflags = __GFP_COMP; in __kmem_cache_create()
2124 cachep->allocflags |= GFP_DMA; in __kmem_cache_create()
2126 cachep->allocflags |= __GFP_RECLAIMABLE; in __kmem_cache_create()
2127 cachep->size = size; in __kmem_cache_create()
2128 cachep->reciprocal_buffer_size = reciprocal_value(size); in __kmem_cache_create()
2137 (cachep->flags & SLAB_POISON) && in __kmem_cache_create()
2138 is_debug_pagealloc_cache(cachep)) in __kmem_cache_create()
2139 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2142 if (OFF_SLAB(cachep)) { in __kmem_cache_create()
2143 cachep->freelist_cache = in __kmem_cache_create()
2144 kmalloc_slab(cachep->freelist_size, 0u); in __kmem_cache_create()
2147 err = setup_cpu_cache(cachep, gfp); in __kmem_cache_create()
2149 __kmem_cache_release(cachep); in __kmem_cache_create()
2172 static void check_spinlock_acquired(struct kmem_cache *cachep) in check_spinlock_acquired() argument
2176 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()
2180 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) in check_spinlock_acquired_node() argument
2184 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2196 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, in drain_array_locked() argument
2208 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2215 struct kmem_cache *cachep = arg; in do_drain() local
2222 ac = cpu_cache_get(cachep); in do_drain()
2223 n = get_node(cachep, node); in do_drain()
2225 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2227 slabs_destroy(cachep, &list); in do_drain()
2231 static void drain_cpu_caches(struct kmem_cache *cachep) in drain_cpu_caches() argument
2237 on_each_cpu(do_drain, cachep, 1); in drain_cpu_caches()
2239 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2241 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2243 for_each_kmem_cache_node(cachep, node, n) { in drain_cpu_caches()
2245 drain_array_locked(cachep, n->shared, node, true, &list); in drain_cpu_caches()
2248 slabs_destroy(cachep, &list); in drain_cpu_caches()
2304 int __kmem_cache_shrink(struct kmem_cache *cachep) in __kmem_cache_shrink() argument
2310 drain_cpu_caches(cachep); in __kmem_cache_shrink()
2313 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2314 drain_freelist(cachep, n, INT_MAX); in __kmem_cache_shrink()
2323 void __kmemcg_cache_deactivate(struct kmem_cache *cachep) in __kmemcg_cache_deactivate() argument
2325 __kmem_cache_shrink(cachep); in __kmemcg_cache_deactivate()
2329 int __kmem_cache_shutdown(struct kmem_cache *cachep) in __kmem_cache_shutdown() argument
2331 return __kmem_cache_shrink(cachep); in __kmem_cache_shutdown()
2334 void __kmem_cache_release(struct kmem_cache *cachep) in __kmem_cache_release() argument
2339 cache_random_seq_destroy(cachep); in __kmem_cache_release()
2341 free_percpu(cachep->cpu_cache); in __kmem_cache_release()
2344 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_release()
2348 cachep->node[i] = NULL; in __kmem_cache_release()
2366 static void *alloc_slabmgmt(struct kmem_cache *cachep, in alloc_slabmgmt() argument
2376 if (OBJFREELIST_SLAB(cachep)) in alloc_slabmgmt()
2378 else if (OFF_SLAB(cachep)) { in alloc_slabmgmt()
2380 freelist = kmem_cache_alloc_node(cachep->freelist_cache, in alloc_slabmgmt()
2386 freelist = addr + (PAGE_SIZE << cachep->gfporder) - in alloc_slabmgmt()
2387 cachep->freelist_size; in alloc_slabmgmt()
2404 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) in cache_init_objs_debug() argument
2409 for (i = 0; i < cachep->num; i++) { in cache_init_objs_debug()
2410 void *objp = index_to_obj(cachep, page, i); in cache_init_objs_debug()
2412 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs_debug()
2413 *dbg_userword(cachep, objp) = NULL; in cache_init_objs_debug()
2415 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2416 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2417 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2424 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { in cache_init_objs_debug()
2425 kasan_unpoison_object_data(cachep, in cache_init_objs_debug()
2426 objp + obj_offset(cachep)); in cache_init_objs_debug()
2427 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs_debug()
2429 cachep, objp + obj_offset(cachep)); in cache_init_objs_debug()
2432 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2433 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2434 slab_error(cachep, "constructor overwrote the end of an object"); in cache_init_objs_debug()
2435 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2436 slab_error(cachep, "constructor overwrote the start of an object"); in cache_init_objs_debug()
2439 if (cachep->flags & SLAB_POISON) { in cache_init_objs_debug()
2440 poison_obj(cachep, objp, POISON_FREE); in cache_init_objs_debug()
2441 slab_kernel_map(cachep, objp, 0, 0); in cache_init_objs_debug()
2463 struct kmem_cache *cachep, in freelist_state_initialize() argument
2473 if (!cachep->random_seq) { in freelist_state_initialize()
2477 state->list = cachep->random_seq; in freelist_state_initialize()
2504 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) in shuffle_freelist() argument
2506 unsigned int objfreelist = 0, i, rand, count = cachep->num; in shuffle_freelist()
2513 precomputed = freelist_state_initialize(&state, cachep, count); in shuffle_freelist()
2516 if (OBJFREELIST_SLAB(cachep)) { in shuffle_freelist()
2521 page->freelist = index_to_obj(cachep, page, objfreelist) + in shuffle_freelist()
2522 obj_offset(cachep); in shuffle_freelist()
2545 if (OBJFREELIST_SLAB(cachep)) in shuffle_freelist()
2546 set_free_obj(page, cachep->num - 1, objfreelist); in shuffle_freelist()
2551 static inline bool shuffle_freelist(struct kmem_cache *cachep, in shuffle_freelist() argument
2558 static void cache_init_objs(struct kmem_cache *cachep, in cache_init_objs() argument
2565 cache_init_objs_debug(cachep, page); in cache_init_objs()
2568 shuffled = shuffle_freelist(cachep, page); in cache_init_objs()
2570 if (!shuffled && OBJFREELIST_SLAB(cachep)) { in cache_init_objs()
2571 page->freelist = index_to_obj(cachep, page, cachep->num - 1) + in cache_init_objs()
2572 obj_offset(cachep); in cache_init_objs()
2575 for (i = 0; i < cachep->num; i++) { in cache_init_objs()
2576 objp = index_to_obj(cachep, page, i); in cache_init_objs()
2577 kasan_init_slab_obj(cachep, objp); in cache_init_objs()
2580 if (DEBUG == 0 && cachep->ctor) { in cache_init_objs()
2581 kasan_unpoison_object_data(cachep, objp); in cache_init_objs()
2582 cachep->ctor(objp); in cache_init_objs()
2583 kasan_poison_object_data(cachep, objp); in cache_init_objs()
2591 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) in slab_get_obj() argument
2595 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2599 if (cachep->flags & SLAB_STORE_USER) in slab_get_obj()
2600 set_store_user_dirty(cachep); in slab_get_obj()
2606 static void slab_put_obj(struct kmem_cache *cachep, in slab_put_obj() argument
2609 unsigned int objnr = obj_to_index(cachep, page, objp); in slab_put_obj()
2614 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2617 cachep->name, objp); in slab_put_obj()
2624 page->freelist = objp + obj_offset(cachep); in slab_put_obj()
2645 static struct page *cache_grow_begin(struct kmem_cache *cachep, in cache_grow_begin() argument
2666 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_grow_begin()
2677 page = kmem_getpages(cachep, local_flags, nodeid); in cache_grow_begin()
2682 n = get_node(cachep, page_node); in cache_grow_begin()
2686 if (n->colour_next >= cachep->colour) in cache_grow_begin()
2690 if (offset >= cachep->colour) in cache_grow_begin()
2693 offset *= cachep->colour_off; in cache_grow_begin()
2696 freelist = alloc_slabmgmt(cachep, page, offset, in cache_grow_begin()
2698 if (OFF_SLAB(cachep) && !freelist) in cache_grow_begin()
2701 slab_map_pages(cachep, page, freelist); in cache_grow_begin()
2704 cache_init_objs(cachep, page); in cache_grow_begin()
2712 kmem_freepages(cachep, page); in cache_grow_begin()
2719 static void cache_grow_end(struct kmem_cache *cachep, struct page *page) in cache_grow_end() argument
2730 n = get_node(cachep, page_to_nid(page)); in cache_grow_end()
2738 fixup_slab_list(cachep, n, page, &list); in cache_grow_end()
2740 STATS_INC_GROWN(cachep); in cache_grow_end()
2741 n->free_objects += cachep->num - page->active; in cache_grow_end()
2744 fixup_objfreelist_debug(cachep, &list); in cache_grow_end()
2785 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, in cache_free_debugcheck() argument
2791 BUG_ON(virt_to_cache(objp) != cachep); in cache_free_debugcheck()
2793 objp -= obj_offset(cachep); in cache_free_debugcheck()
2797 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2798 verify_redzone_free(cachep, objp); in cache_free_debugcheck()
2799 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2800 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2802 if (cachep->flags & SLAB_STORE_USER) { in cache_free_debugcheck()
2803 set_store_user_dirty(cachep); in cache_free_debugcheck()
2804 *dbg_userword(cachep, objp) = (void *)caller; in cache_free_debugcheck()
2807 objnr = obj_to_index(cachep, page, objp); in cache_free_debugcheck()
2809 BUG_ON(objnr >= cachep->num); in cache_free_debugcheck()
2810 BUG_ON(objp != index_to_obj(cachep, page, objnr)); in cache_free_debugcheck()
2812 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2813 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2814 slab_kernel_map(cachep, objp, 0, caller); in cache_free_debugcheck()
2824 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, in fixup_objfreelist_debug() argument
2832 objp = next - obj_offset(cachep); in fixup_objfreelist_debug()
2834 poison_obj(cachep, objp, POISON_FREE); in fixup_objfreelist_debug()
2839 static inline void fixup_slab_list(struct kmem_cache *cachep, in fixup_slab_list() argument
2845 if (page->active == cachep->num) { in fixup_slab_list()
2847 if (OBJFREELIST_SLAB(cachep)) { in fixup_slab_list()
2850 if (cachep->flags & SLAB_POISON) { in fixup_slab_list()
2926 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, in cache_alloc_pfmemalloc() argument
2943 obj = slab_get_obj(cachep, page); in cache_alloc_pfmemalloc()
2946 fixup_slab_list(cachep, n, page, &list); in cache_alloc_pfmemalloc()
2949 fixup_objfreelist_debug(cachep, &list); in cache_alloc_pfmemalloc()
2958 static __always_inline int alloc_block(struct kmem_cache *cachep, in alloc_block() argument
2965 BUG_ON(page->active >= cachep->num); in alloc_block()
2967 while (page->active < cachep->num && batchcount--) { in alloc_block()
2968 STATS_INC_ALLOCED(cachep); in alloc_block()
2969 STATS_INC_ACTIVE(cachep); in alloc_block()
2970 STATS_SET_HIGH(cachep); in alloc_block()
2972 ac->entry[ac->avail++] = slab_get_obj(cachep, page); in alloc_block()
2978 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) in cache_alloc_refill() argument
2990 ac = cpu_cache_get(cachep); in cache_alloc_refill()
3000 n = get_node(cachep, node); in cache_alloc_refill()
3022 check_spinlock_acquired(cachep); in cache_alloc_refill()
3024 batchcount = alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
3025 fixup_slab_list(cachep, n, page, &list); in cache_alloc_refill()
3032 fixup_objfreelist_debug(cachep, &list); in cache_alloc_refill()
3038 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); in cache_alloc_refill()
3044 page = cache_grow_begin(cachep, gfp_exact_node(flags), node); in cache_alloc_refill()
3050 ac = cpu_cache_get(cachep); in cache_alloc_refill()
3052 alloc_block(cachep, ac, page, batchcount); in cache_alloc_refill()
3053 cache_grow_end(cachep, page); in cache_alloc_refill()
3063 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, in cache_alloc_debugcheck_before() argument
3070 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, in cache_alloc_debugcheck_after() argument
3073 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_alloc_debugcheck_after()
3076 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
3077 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
3078 slab_kernel_map(cachep, objp, 1, 0); in cache_alloc_debugcheck_after()
3079 poison_obj(cachep, objp, POISON_INUSE); in cache_alloc_debugcheck_after()
3081 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
3082 *dbg_userword(cachep, objp) = (void *)caller; in cache_alloc_debugcheck_after()
3084 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
3085 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || in cache_alloc_debugcheck_after()
3086 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { in cache_alloc_debugcheck_after()
3087 slab_error(cachep, "double free, or memory outside object was overwritten"); in cache_alloc_debugcheck_after()
3089 objp, *dbg_redzone1(cachep, objp), in cache_alloc_debugcheck_after()
3090 *dbg_redzone2(cachep, objp)); in cache_alloc_debugcheck_after()
3092 *dbg_redzone1(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
3093 *dbg_redzone2(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
3096 objp += obj_offset(cachep); in cache_alloc_debugcheck_after()
3097 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
3098 cachep->ctor(objp); in cache_alloc_debugcheck_after()
3110 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) in ____cache_alloc() argument
3117 ac = cpu_cache_get(cachep); in ____cache_alloc()
3122 STATS_INC_ALLOCHIT(cachep); in ____cache_alloc()
3126 STATS_INC_ALLOCMISS(cachep); in ____cache_alloc()
3127 objp = cache_alloc_refill(cachep, flags); in ____cache_alloc()
3132 ac = cpu_cache_get(cachep); in ____cache_alloc()
3152 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) in alternate_node_alloc() argument
3159 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
3164 return ____cache_alloc_node(cachep, flags, nid_alloc); in alternate_node_alloc()
3243 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in ____cache_alloc_node() argument
3252 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3261 check_spinlock_acquired_node(cachep, nodeid); in ____cache_alloc_node()
3263 STATS_INC_NODEALLOCS(cachep); in ____cache_alloc_node()
3264 STATS_INC_ACTIVE(cachep); in ____cache_alloc_node()
3265 STATS_SET_HIGH(cachep); in ____cache_alloc_node()
3267 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3269 obj = slab_get_obj(cachep, page); in ____cache_alloc_node()
3272 fixup_slab_list(cachep, n, page, &list); in ____cache_alloc_node()
3275 fixup_objfreelist_debug(cachep, &list); in ____cache_alloc_node()
3280 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); in ____cache_alloc_node()
3283 obj = slab_get_obj(cachep, page); in ____cache_alloc_node()
3285 cache_grow_end(cachep, page); in ____cache_alloc_node()
3287 return obj ? obj : fallback_alloc(cachep, flags); in ____cache_alloc_node()
3291 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, in slab_alloc_node() argument
3299 cachep = slab_pre_alloc_hook(cachep, flags); in slab_alloc_node()
3300 if (unlikely(!cachep)) in slab_alloc_node()
3303 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc_node()
3309 if (unlikely(!get_node(cachep, nodeid))) { in slab_alloc_node()
3311 ptr = fallback_alloc(cachep, flags); in slab_alloc_node()
3322 ptr = ____cache_alloc(cachep, flags); in slab_alloc_node()
3327 ptr = ____cache_alloc_node(cachep, flags, nodeid); in slab_alloc_node()
3330 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); in slab_alloc_node()
3333 memset(ptr, 0, cachep->object_size); in slab_alloc_node()
3335 slab_post_alloc_hook(cachep, flags, 1, &ptr); in slab_alloc_node()
3364 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in __do_cache_alloc() argument
3366 return ____cache_alloc(cachep, flags); in __do_cache_alloc()
3372 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) in slab_alloc() argument
3378 cachep = slab_pre_alloc_hook(cachep, flags); in slab_alloc()
3379 if (unlikely(!cachep)) in slab_alloc()
3382 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc()
3384 objp = __do_cache_alloc(cachep, flags); in slab_alloc()
3386 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc()
3390 memset(objp, 0, cachep->object_size); in slab_alloc()
3392 slab_post_alloc_hook(cachep, flags, 1, &objp); in slab_alloc()
3400 static void free_block(struct kmem_cache *cachep, void **objpp, in free_block() argument
3404 struct kmem_cache_node *n = get_node(cachep, node); in free_block()
3417 check_spinlock_acquired_node(cachep, node); in free_block()
3418 slab_put_obj(cachep, page, objp); in free_block()
3419 STATS_DEC_ACTIVE(cachep); in free_block()
3435 n->free_objects -= cachep->num; in free_block()
3444 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3454 n = get_node(cachep, node); in cache_flusharray()
3469 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3481 STATS_SET_FREEABLE(cachep, i); in cache_flusharray()
3485 slabs_destroy(cachep, &list); in cache_flusharray()
3494 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, in __cache_free() argument
3498 if (kasan_slab_free(cachep, objp, _RET_IP_)) in __cache_free()
3501 ___cache_free(cachep, objp, caller); in __cache_free()
3504 void ___cache_free(struct kmem_cache *cachep, void *objp, in ___cache_free() argument
3507 struct array_cache *ac = cpu_cache_get(cachep); in ___cache_free()
3510 kmemleak_free_recursive(objp, cachep->flags); in ___cache_free()
3511 objp = cache_free_debugcheck(cachep, objp, caller); in ___cache_free()
3520 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) in ___cache_free()
3524 STATS_INC_FREEHIT(cachep); in ___cache_free()
3526 STATS_INC_FREEMISS(cachep); in ___cache_free()
3527 cache_flusharray(cachep, ac); in ___cache_free()
3534 cache_free_pfmemalloc(cachep, page, objp); in ___cache_free()
3550 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in kmem_cache_alloc() argument
3552 void *ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc()
3554 kasan_slab_alloc(cachep, ret, flags); in kmem_cache_alloc()
3556 cachep->object_size, cachep->size, flags); in kmem_cache_alloc()
3614 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) in kmem_cache_alloc_trace() argument
3618 ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc_trace()
3620 kasan_kmalloc(cachep, ret, size, flags); in kmem_cache_alloc_trace()
3622 size, cachep->size, flags); in kmem_cache_alloc_trace()
3640 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) in kmem_cache_alloc_node() argument
3642 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node()
3644 kasan_slab_alloc(cachep, ret, flags); in kmem_cache_alloc_node()
3646 cachep->object_size, cachep->size, in kmem_cache_alloc_node()
3654 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, in kmem_cache_alloc_node_trace() argument
3661 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node_trace()
3663 kasan_kmalloc(cachep, ret, size, flags); in kmem_cache_alloc_node_trace()
3665 size, cachep->size, in kmem_cache_alloc_node_trace()
3675 struct kmem_cache *cachep; in __do_kmalloc_node() local
3678 cachep = kmalloc_slab(size, flags); in __do_kmalloc_node()
3679 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc_node()
3680 return cachep; in __do_kmalloc_node()
3681 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); in __do_kmalloc_node()
3682 kasan_kmalloc(cachep, ret, size, flags); in __do_kmalloc_node()
3710 struct kmem_cache *cachep; in __do_kmalloc() local
3713 cachep = kmalloc_slab(size, flags); in __do_kmalloc()
3714 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc()
3715 return cachep; in __do_kmalloc()
3716 ret = slab_alloc(cachep, flags, caller); in __do_kmalloc()
3718 kasan_kmalloc(cachep, ret, size, flags); in __do_kmalloc()
3720 size, cachep->size, flags); in __do_kmalloc()
3745 void kmem_cache_free(struct kmem_cache *cachep, void *objp) in kmem_cache_free() argument
3748 cachep = cache_from_obj(cachep, objp); in kmem_cache_free()
3749 if (!cachep) in kmem_cache_free()
3753 debug_check_no_locks_freed(objp, cachep->object_size); in kmem_cache_free()
3754 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free()
3755 debug_check_no_obj_freed(objp, cachep->object_size); in kmem_cache_free()
3756 __cache_free(cachep, objp, _RET_IP_); in kmem_cache_free()
3821 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) in setup_kmem_cache_nodes() argument
3828 ret = setup_kmem_cache_node(cachep, node, gfp, true); in setup_kmem_cache_nodes()
3837 if (!cachep->list.next) { in setup_kmem_cache_nodes()
3841 n = get_node(cachep, node); in setup_kmem_cache_nodes()
3846 cachep->node[node] = NULL; in setup_kmem_cache_nodes()
3855 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, in __do_tune_cpucache() argument
3861 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); in __do_tune_cpucache()
3865 prev = cachep->cpu_cache; in __do_tune_cpucache()
3866 cachep->cpu_cache = cpu_cache; in __do_tune_cpucache()
3875 cachep->batchcount = batchcount; in __do_tune_cpucache()
3876 cachep->limit = limit; in __do_tune_cpucache()
3877 cachep->shared = shared; in __do_tune_cpucache()
3889 n = get_node(cachep, node); in __do_tune_cpucache()
3891 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3893 slabs_destroy(cachep, &list); in __do_tune_cpucache()
3898 return setup_kmem_cache_nodes(cachep, gfp); in __do_tune_cpucache()
3901 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, in do_tune_cpucache() argument
3907 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in do_tune_cpucache()
3912 if ((ret < 0) || !is_root_cache(cachep)) in do_tune_cpucache()
3916 for_each_memcg_cache(c, cachep) { in do_tune_cpucache()
3925 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) in enable_cpucache() argument
3932 err = cache_random_seq_create(cachep, cachep->num, gfp); in enable_cpucache()
3936 if (!is_root_cache(cachep)) { in enable_cpucache()
3937 struct kmem_cache *root = memcg_root_cache(cachep); in enable_cpucache()
3954 if (cachep->size > 131072) in enable_cpucache()
3956 else if (cachep->size > PAGE_SIZE) in enable_cpucache()
3958 else if (cachep->size > 1024) in enable_cpucache()
3960 else if (cachep->size > 256) in enable_cpucache()
3975 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) in enable_cpucache()
3988 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in enable_cpucache()
3992 cachep->name, -err); in enable_cpucache()
4001 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
4018 drain_array_locked(cachep, ac, node, false, &list); in drain_array()
4021 slabs_destroy(cachep, &list); in drain_array()
4093 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) in get_slabinfo() argument
4101 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
4114 num_objs = total_slabs * cachep->num; in get_slabinfo()
4123 sinfo->limit = cachep->limit; in get_slabinfo()
4124 sinfo->batchcount = cachep->batchcount; in get_slabinfo()
4125 sinfo->shared = cachep->shared; in get_slabinfo()
4126 sinfo->objects_per_slab = cachep->num; in get_slabinfo()
4127 sinfo->cache_order = cachep->gfporder; in get_slabinfo()
4130 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) in slabinfo_show_stats() argument
4134 unsigned long high = cachep->high_mark; in slabinfo_show_stats()
4135 unsigned long allocs = cachep->num_allocations; in slabinfo_show_stats()
4136 unsigned long grown = cachep->grown; in slabinfo_show_stats()
4137 unsigned long reaped = cachep->reaped; in slabinfo_show_stats()
4138 unsigned long errors = cachep->errors; in slabinfo_show_stats()
4139 unsigned long max_freeable = cachep->max_freeable; in slabinfo_show_stats()
4140 unsigned long node_allocs = cachep->node_allocs; in slabinfo_show_stats()
4141 unsigned long node_frees = cachep->node_frees; in slabinfo_show_stats()
4142 unsigned long overflows = cachep->node_overflow; in slabinfo_show_stats()
4151 unsigned long allochit = atomic_read(&cachep->allochit); in slabinfo_show_stats()
4152 unsigned long allocmiss = atomic_read(&cachep->allocmiss); in slabinfo_show_stats()
4153 unsigned long freehit = atomic_read(&cachep->freehit); in slabinfo_show_stats()
4154 unsigned long freemiss = atomic_read(&cachep->freemiss); in slabinfo_show_stats()
4175 struct kmem_cache *cachep; in slabinfo_write() local
4194 list_for_each_entry(cachep, &slab_caches, list) { in slabinfo_write()
4195 if (!strcmp(cachep->name, kbuf)) { in slabinfo_write()
4200 res = do_tune_cpucache(cachep, limit, in slabinfo_write()
4299 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); in leaks_show() local
4307 if (!(cachep->flags & SLAB_STORE_USER)) in leaks_show()
4309 if (!(cachep->flags & SLAB_RED_ZONE)) in leaks_show()
4319 set_store_user_clean(cachep); in leaks_show()
4320 drain_cpu_caches(cachep); in leaks_show()
4324 for_each_kmem_cache_node(cachep, node, n) { in leaks_show()
4330 handle_slab(x, cachep, page); in leaks_show()
4332 handle_slab(x, cachep, page); in leaks_show()
4335 } while (!is_store_user_clean(cachep)); in leaks_show()
4337 name = cachep->name; in leaks_show()
4414 struct kmem_cache *cachep; in __check_heap_object() local
4419 cachep = page->slab_cache; in __check_heap_object()
4420 objnr = obj_to_index(cachep, page, (void *)ptr); in __check_heap_object()
4421 BUG_ON(objnr >= cachep->num); in __check_heap_object()
4424 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); in __check_heap_object()
4427 if (offset >= cachep->useroffset && in __check_heap_object()
4428 offset - cachep->useroffset <= cachep->usersize && in __check_heap_object()
4429 n <= cachep->useroffset - offset + cachep->usersize) in __check_heap_object()
4439 offset <= cachep->object_size && in __check_heap_object()
4440 n <= cachep->object_size - offset) { in __check_heap_object()
4441 usercopy_warn("SLAB object", cachep->name, to_user, offset, n); in __check_heap_object()
4445 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); in __check_heap_object()