Lines Matching +full:hall +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0
7 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
9 * Major cleanup, different bufctl logic, per-cpu arrays
17 * Pub: Prentice Hall ISBN 0-13-101908-2
19 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
48 * Each cache has a short per-cpu head array, most allocs
54 * The c_cpuarray may not be read with enabled local interrupts -
61 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
62 * and local interrupts are disabled so slab code is preempt-safe.
63 * The non-constant members are protected with a per-cache irq spinlock.
65 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
66 * in 2000 - many ideas in the current implementation are derived from
71 * 11 April '97. Started multi-threading - markhe
72 * The global cache-chain is protected by the mutex 'slab_mutex'.
73 * The sem is only needed when accessing/extending the cache-chain, which
113 #include <linux/fault-inject.h>
134 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
137 * STATS - 1 to collect stats for /proc/slabinfo.
140 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
170 #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
176 * - LIFO ordering, to hand out cache-warm objects from _alloc
177 * - reduce the number of linked list operations
178 * - reduce spinlock operations
180 * The limit is stored in the per-cpu structure to reduce the data cache
228 INIT_LIST_HEAD(&parent->slabs_full); in kmem_cache_node_init()
229 INIT_LIST_HEAD(&parent->slabs_partial); in kmem_cache_node_init()
230 INIT_LIST_HEAD(&parent->slabs_free); in kmem_cache_node_init()
231 parent->total_slabs = 0; in kmem_cache_node_init()
232 parent->free_slabs = 0; in kmem_cache_node_init()
233 parent->shared = NULL; in kmem_cache_node_init()
234 parent->alien = NULL; in kmem_cache_node_init()
235 parent->colour_next = 0; in kmem_cache_node_init()
236 spin_lock_init(&parent->list_lock); in kmem_cache_node_init()
237 parent->free_objects = 0; in kmem_cache_node_init()
238 parent->free_touched = 0; in kmem_cache_node_init()
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
256 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
257 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
271 #define STATS_INC_ACTIVE(x) ((x)->num_active++)
272 #define STATS_DEC_ACTIVE(x) ((x)->num_active--)
273 #define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
274 #define STATS_INC_GROWN(x) ((x)->grown++)
275 #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
278 if ((x)->num_active > (x)->high_mark) \
279 (x)->high_mark = (x)->num_active; \
281 #define STATS_INC_ERR(x) ((x)->errors++)
282 #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
283 #define STATS_INC_NODEFREES(x) ((x)->node_frees++)
284 #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
287 if ((x)->max_freeable < i) \
288 (x)->max_freeable = i; \
290 #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
291 #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
292 #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
293 #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
322 * cachep->obj_offset: The real object.
323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
324 * cachep->size - 1* BYTES_PER_WORD: last caller address
329 return cachep->obj_offset; in obj_offset()
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
335 return (unsigned long long*) (objp + obj_offset(cachep) - in dbg_redzone1()
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
342 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
343 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
344 sizeof(unsigned long long) - in dbg_redzone2()
346 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
353 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
377 return page->s_mem + cache->size * idx; in index_to_obj()
394 return this_cpu_ptr(cachep->cpu_cache); in cpu_cache_get()
398 * Calculate the number of objects and left-over bytes for a given buffer size.
411 * - @buffer_size bytes for each object in cache_estimate()
412 * - One freelist_idx_t for each object in cache_estimate()
442 function, cachep->name, msg); in __slab_error()
468 min(slab_max_order, MAX_ORDER - 1); in slab_max_order_setup()
514 if (reap_work->work.func == NULL) { in start_cpu_timer()
525 ac->avail = 0; in init_arraycache()
526 ac->limit = limit; in init_arraycache()
527 ac->batchcount = batch; in init_arraycache()
528 ac->touched = 0; in init_arraycache()
561 spin_lock(&n->list_lock); in cache_free_pfmemalloc()
563 spin_unlock(&n->list_lock); in cache_free_pfmemalloc()
578 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects()
583 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects()
586 from->avail -= nr; in transfer_objects()
587 to->avail += nr; in transfer_objects()
591 /* &alien->lock must be held by alien callers. */
594 /* Avoid trivial double-free. */ in __free_one()
596 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp)) in __free_one()
598 ac->entry[ac->avail++] = objp; in __free_one()
652 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
653 spin_lock_init(&alc->lock); in __alloc_alien_cache()
674 for (i--; i >= 0; i--) in alloc_alien_cache()
700 if (ac->avail) { in __drain_alien_cache()
701 spin_lock(&n->list_lock); in __drain_alien_cache()
707 if (n->shared) in __drain_alien_cache()
708 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
710 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
711 ac->avail = 0; in __drain_alien_cache()
712 spin_unlock(&n->list_lock); in __drain_alien_cache()
723 if (n->alien) { in reap_alien()
724 struct alien_cache *alc = n->alien[node]; in reap_alien()
728 ac = &alc->ac; in reap_alien()
729 if (ac->avail && spin_trylock_irq(&alc->lock)) { in reap_alien()
733 spin_unlock_irq(&alc->lock); in reap_alien()
753 ac = &alc->ac; in drain_alien_cache()
754 spin_lock_irqsave(&alc->lock, flags); in drain_alien_cache()
756 spin_unlock_irqrestore(&alc->lock, flags); in drain_alien_cache()
772 if (n->alien && n->alien[page_node]) { in __cache_free_alien()
773 alien = n->alien[page_node]; in __cache_free_alien()
774 ac = &alien->ac; in __cache_free_alien()
775 spin_lock(&alien->lock); in __cache_free_alien()
776 if (unlikely(ac->avail == ac->limit)) { in __cache_free_alien()
781 spin_unlock(&alien->lock); in __cache_free_alien()
785 spin_lock(&n->list_lock); in __cache_free_alien()
787 spin_unlock(&n->list_lock); in __cache_free_alien()
828 spin_lock_irq(&n->list_lock); in init_cache_node()
829 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + in init_cache_node()
830 cachep->num; in init_cache_node()
831 spin_unlock_irq(&n->list_lock); in init_cache_node()
838 return -ENOMEM; in init_cache_node()
841 n->next_reap = jiffies + REAPTIMEOUT_NODE + in init_cache_node()
844 n->free_limit = in init_cache_node()
845 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; in init_cache_node()
852 cachep->node[node] = n; in init_cache_node()
860 * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
861 * will be allocated off-node since memory is not yet online for the new node.
885 int ret = -ENOMEM; in setup_kmem_cache_node()
893 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in setup_kmem_cache_node()
898 if (cachep->shared) { in setup_kmem_cache_node()
900 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); in setup_kmem_cache_node()
910 spin_lock_irq(&n->list_lock); in setup_kmem_cache_node()
911 if (n->shared && force_change) { in setup_kmem_cache_node()
912 free_block(cachep, n->shared->entry, in setup_kmem_cache_node()
913 n->shared->avail, node, &list); in setup_kmem_cache_node()
914 n->shared->avail = 0; in setup_kmem_cache_node()
917 if (!n->shared || force_change) { in setup_kmem_cache_node()
918 old_shared = n->shared; in setup_kmem_cache_node()
919 n->shared = new_shared; in setup_kmem_cache_node()
923 if (!n->alien) { in setup_kmem_cache_node()
924 n->alien = new_alien; in setup_kmem_cache_node()
928 spin_unlock_irq(&n->list_lock); in setup_kmem_cache_node()
932 * To protect lockless access to n->shared during irq disabled context. in setup_kmem_cache_node()
933 * If n->shared isn't NULL in irq disabled context, accessing to it is in setup_kmem_cache_node()
934 * guaranteed to be valid until irq is re-enabled, because it will be in setup_kmem_cache_node()
967 spin_lock_irq(&n->list_lock); in cpuup_canceled()
970 n->free_limit -= cachep->batchcount; in cpuup_canceled()
973 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
974 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
975 nc->avail = 0; in cpuup_canceled()
978 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
982 shared = n->shared; in cpuup_canceled()
984 free_block(cachep, shared->entry, in cpuup_canceled()
985 shared->avail, node, &list); in cpuup_canceled()
986 n->shared = NULL; in cpuup_canceled()
989 alien = n->alien; in cpuup_canceled()
990 n->alien = NULL; in cpuup_canceled()
992 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
1045 return -ENOMEM; in cpuup_prepare()
1099 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1100 * Returns -EBUSY if all objects cannot be drained so that the node is not
1119 if (!list_empty(&n->slabs_full) || in drain_cache_node_node()
1120 !list_empty(&n->slabs_partial)) { in drain_cache_node_node()
1121 ret = -EBUSY; in drain_cache_node_node()
1135 nid = mnb->status_change_nid; in slab_memory_callback()
1176 spin_lock_init(&ptr->list_lock); in init_list()
1179 cachep->node[nodeid] = ptr; in init_list()
1191 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1192 cachep->node[node]->next_reap = jiffies + in set_up_node()
1215 * Fragmentation resistance on low memory - only use bigger in kmem_cache_init()
1251 list_add(&kmem_cache->list, &slab_caches); in kmem_cache_init()
1342 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1347 spin_lock_irqsave(&n->list_lock, flags); in slab_out_of_memory()
1348 total_slabs = n->total_slabs; in slab_out_of_memory()
1349 free_slabs = n->free_slabs; in slab_out_of_memory()
1350 free_objs = n->free_objects; in slab_out_of_memory()
1351 spin_unlock_irqrestore(&n->list_lock, flags); in slab_out_of_memory()
1354 node, total_slabs - free_slabs, total_slabs, in slab_out_of_memory()
1355 (total_slabs * cachep->num) - free_objs, in slab_out_of_memory()
1356 total_slabs * cachep->num); in slab_out_of_memory()
1363 * kmem_cache_node ->list_lock.
1374 flags |= cachep->allocflags; in kmem_getpages()
1376 page = __alloc_pages_node(nodeid, flags, cachep->gfporder); in kmem_getpages()
1382 account_slab_page(page, cachep->gfporder, cachep); in kmem_getpages()
1396 int order = cachep->gfporder; in kmem_freepages()
1402 page->mapping = NULL; in kmem_freepages()
1404 if (current->reclaim_state) in kmem_freepages()
1405 current->reclaim_state->reclaimed_slab += 1 << order; in kmem_freepages()
1416 cachep = page->slab_cache; in kmem_rcu_free()
1425 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache()
1437 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
1448 int size = cachep->object_size; in poison_obj()
1452 *(unsigned char *)(addr + size - 1) = POISON_END; in poison_obj()
1473 if (!(error & (error - 1))) { in dump_line()
1492 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1498 if (cachep->flags & SLAB_STORE_USER) in print_objinfo()
1501 size = cachep->object_size; in print_objinfo()
1502 for (i = 0; i < size && lines; i += 16, lines--) { in print_objinfo()
1506 limit = size - i; in print_objinfo()
1521 size = cachep->object_size; in check_poison_obj()
1525 if (i == size - 1) in check_poison_obj()
1533 print_tainted(), cachep->name, in check_poison_obj()
1541 limit = size - i; in check_poison_obj()
1559 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1564 if (objnr + 1 < cachep->num) { in check_poison_obj()
1580 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1581 poison_obj(cachep, page->freelist - obj_offset(cachep), in slab_destroy_debugcheck()
1585 for (i = 0; i < cachep->num; i++) { in slab_destroy_debugcheck()
1588 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1592 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1608 * slab_destroy - destroy and release all objects in a slab
1614 * kmem_cache_node ->list_lock is not held/needed.
1620 freelist = page->freelist; in slab_destroy()
1622 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) in slab_destroy()
1623 call_rcu(&page->rcu_head, kmem_rcu_free); in slab_destroy()
1632 kmem_cache_free(cachep->freelist_cache, freelist); in slab_destroy()
1644 list_del(&page->slab_list); in slabs_destroy()
1650 * calculate_slab_order - calculate size (page order) of slabs
1659 * towards high-order requests, this should be changed.
1661 * Return: number of left-over bytes in a slab
1698 if (freelist_cache->size > cachep->size / 2) in calculate_slab_order()
1702 /* Found something acceptable - save it away */ in calculate_slab_order()
1703 cachep->num = num; in calculate_slab_order()
1704 cachep->gfporder = gfporder; in calculate_slab_order()
1708 * A VFS-reclaimable slab tends to have most allocations in calculate_slab_order()
1710 * higher-order pages when we are unable to shrink dcache. in calculate_slab_order()
1757 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); in setup_cpu_cache()
1758 if (!cachep->cpu_cache) in setup_cpu_cache()
1771 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
1773 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
1774 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
1778 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
1782 cpu_cache_get(cachep)->avail = 0; in setup_cpu_cache()
1783 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1784 cpu_cache_get(cachep)->batchcount = 1; in setup_cpu_cache()
1785 cpu_cache_get(cachep)->touched = 0; in setup_cpu_cache()
1786 cachep->batchcount = 1; in setup_cpu_cache()
1787 cachep->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1806 cachep->refcount++; in __kmem_cache_alias()
1812 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias()
1822 cachep->num = 0; in set_objfreelist_slab_cache()
1825 * If slab auto-initialization on free is enabled, store the freelist in set_objfreelist_slab_cache()
1826 * off-slab, so that its contents don't end up in one of the allocated in set_objfreelist_slab_cache()
1832 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) in set_objfreelist_slab_cache()
1837 if (!cachep->num) in set_objfreelist_slab_cache()
1840 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) in set_objfreelist_slab_cache()
1843 cachep->colour = left / cachep->colour_off; in set_objfreelist_slab_cache()
1853 cachep->num = 0; in set_off_slab_cache()
1856 * Always use on-slab management when SLAB_NOLEAKTRACE in set_off_slab_cache()
1864 * off-slab (should allow better packing of objs). in set_off_slab_cache()
1867 if (!cachep->num) in set_off_slab_cache()
1871 * If the slab has been placed off-slab, and we have enough space then in set_off_slab_cache()
1872 * move it on-slab. This is at the expense of any extra colouring. in set_off_slab_cache()
1874 if (left >= cachep->num * sizeof(freelist_idx_t)) in set_off_slab_cache()
1877 cachep->colour = left / cachep->colour_off; in set_off_slab_cache()
1887 cachep->num = 0; in set_on_slab_cache()
1890 if (!cachep->num) in set_on_slab_cache()
1893 cachep->colour = left / cachep->colour_off; in set_on_slab_cache()
1899 * __kmem_cache_create - Create a cache.
1909 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1912 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1915 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1926 unsigned int size = cachep->size; in __kmem_cache_create()
1931 * Enable redzoning and last user accounting, except for caches with in __kmem_cache_create()
1936 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + in __kmem_cache_create()
1947 * sure any on-slab bufctl's are also correctly aligned. in __kmem_cache_create()
1959 if (ralign < cachep->align) { in __kmem_cache_create()
1960 ralign = cachep->align; in __kmem_cache_create()
1968 cachep->align = ralign; in __kmem_cache_create()
1969 cachep->colour_off = cache_line_size(); in __kmem_cache_create()
1971 if (cachep->colour_off < cachep->align) in __kmem_cache_create()
1972 cachep->colour_off = cachep->align; in __kmem_cache_create()
1982 * Both debugging options require word-alignment which is calculated in __kmem_cache_create()
1987 cachep->obj_offset += sizeof(unsigned long long); in __kmem_cache_create()
2004 size = ALIGN(size, cachep->align); in __kmem_cache_create()
2010 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); in __kmem_cache_create()
2014 * To activate debug pagealloc, off-slab management is necessary in __kmem_cache_create()
2021 size >= 256 && cachep->object_size > cache_line_size()) { in __kmem_cache_create()
2027 cachep->obj_offset += tmp_size - size; in __kmem_cache_create()
2048 return -E2BIG; in __kmem_cache_create()
2051 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); in __kmem_cache_create()
2052 cachep->flags = flags; in __kmem_cache_create()
2053 cachep->allocflags = __GFP_COMP; in __kmem_cache_create()
2055 cachep->allocflags |= GFP_DMA; in __kmem_cache_create()
2057 cachep->allocflags |= GFP_DMA32; in __kmem_cache_create()
2059 cachep->allocflags |= __GFP_RECLAIMABLE; in __kmem_cache_create()
2060 cachep->size = size; in __kmem_cache_create()
2061 cachep->reciprocal_buffer_size = reciprocal_value(size); in __kmem_cache_create()
2070 (cachep->flags & SLAB_POISON) && in __kmem_cache_create()
2072 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2076 cachep->freelist_cache = in __kmem_cache_create()
2077 kmalloc_slab(cachep->freelist_size, 0u); in __kmem_cache_create()
2109 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()
2117 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2134 if (!ac || !ac->avail) in drain_array_locked()
2137 tofree = free_all ? ac->avail : (ac->limit + 4) / 5; in drain_array_locked()
2138 if (tofree > ac->avail) in drain_array_locked()
2139 tofree = (ac->avail + 1) / 2; in drain_array_locked()
2141 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2142 ac->avail -= tofree; in drain_array_locked()
2143 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); in drain_array_locked()
2157 spin_lock(&n->list_lock); in do_drain()
2158 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2159 spin_unlock(&n->list_lock); in do_drain()
2160 ac->avail = 0; in do_drain()
2173 if (n->alien) in drain_cpu_caches()
2174 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2177 spin_lock_irq(&n->list_lock); in drain_cpu_caches()
2178 drain_array_locked(cachep, n->shared, node, true, &list); in drain_cpu_caches()
2179 spin_unlock_irq(&n->list_lock); in drain_cpu_caches()
2199 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { in drain_freelist()
2201 spin_lock_irq(&n->list_lock); in drain_freelist()
2202 p = n->slabs_free.prev; in drain_freelist()
2203 if (p == &n->slabs_free) { in drain_freelist()
2204 spin_unlock_irq(&n->list_lock); in drain_freelist()
2209 list_del(&page->slab_list); in drain_freelist()
2210 n->free_slabs--; in drain_freelist()
2211 n->total_slabs--; in drain_freelist()
2216 n->free_objects -= cache->num; in drain_freelist()
2217 spin_unlock_irq(&n->list_lock); in drain_freelist()
2231 if (!list_empty(&n->slabs_full) || in __kmem_cache_empty()
2232 !list_empty(&n->slabs_partial)) in __kmem_cache_empty()
2249 ret += !list_empty(&n->slabs_full) || in __kmem_cache_shrink()
2250 !list_empty(&n->slabs_partial); in __kmem_cache_shrink()
2267 free_percpu(cachep->cpu_cache); in __kmem_cache_release()
2271 kfree(n->shared); in __kmem_cache_release()
2272 free_alien_cache(n->alien); in __kmem_cache_release()
2274 cachep->node[i] = NULL; in __kmem_cache_release()
2281 * For a slab cache when the slab descriptor is off-slab, the
2286 * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one.
2287 * This is a "chicken-and-egg" problem.
2289 * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches,
2299 page->s_mem = addr + colour_off; in alloc_slabmgmt()
2300 page->active = 0; in alloc_slabmgmt()
2305 /* Slab management obj is off-slab. */ in alloc_slabmgmt()
2306 freelist = kmem_cache_alloc_node(cachep->freelist_cache, in alloc_slabmgmt()
2310 freelist = addr + (PAGE_SIZE << cachep->gfporder) - in alloc_slabmgmt()
2311 cachep->freelist_size; in alloc_slabmgmt()
2319 return ((freelist_idx_t *)page->freelist)[idx]; in get_free_obj()
2325 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
2333 for (i = 0; i < cachep->num; i++) { in cache_init_objs_debug()
2336 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs_debug()
2339 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2348 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { in cache_init_objs_debug()
2351 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs_debug()
2356 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2363 if (cachep->flags & SLAB_POISON) { in cache_init_objs_debug()
2384 * return true if the pre-computed list is available, false otherwize.
2396 /* Use a random state if the pre-computed list is not available */ in freelist_state_initialize()
2397 if (!cachep->random_seq) { in freelist_state_initialize()
2398 prandom_seed_state(&state->rnd_state, rand); in freelist_state_initialize()
2401 state->list = cachep->random_seq; in freelist_state_initialize()
2402 state->count = count; in freelist_state_initialize()
2403 state->pos = rand % count; in freelist_state_initialize()
2412 if (state->pos >= state->count) in next_random_slot()
2413 state->pos = 0; in next_random_slot()
2414 return state->list[state->pos++]; in next_random_slot()
2420 swap(((freelist_idx_t *)page->freelist)[a], in swap_free_obj()
2421 ((freelist_idx_t *)page->freelist)[b]); in swap_free_obj()
2425 * Shuffle the freelist initialization state based on pre-computed lists.
2430 unsigned int objfreelist = 0, i, rand, count = cachep->num; in shuffle_freelist()
2442 objfreelist = count - 1; in shuffle_freelist()
2445 page->freelist = index_to_obj(cachep, page, objfreelist) + in shuffle_freelist()
2447 count--; in shuffle_freelist()
2452 * Later use a pre-computed list for speed. in shuffle_freelist()
2458 /* Fisher-Yates shuffle */ in shuffle_freelist()
2459 for (i = count - 1; i > 0; i--) { in shuffle_freelist()
2470 set_free_obj(page, cachep->num - 1, objfreelist); in shuffle_freelist()
2495 page->freelist = index_to_obj(cachep, page, cachep->num - 1) + in cache_init_objs()
2499 for (i = 0; i < cachep->num; i++) { in cache_init_objs()
2504 if (DEBUG == 0 && cachep->ctor) { in cache_init_objs()
2506 cachep->ctor(objp); in cache_init_objs()
2519 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2520 page->active++; in slab_get_obj()
2533 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2536 cachep->name, objp); in slab_put_obj()
2541 page->active--; in slab_put_obj()
2542 if (!page->freelist) in slab_put_obj()
2543 page->freelist = objp + obj_offset(cachep); in slab_put_obj()
2545 set_free_obj(page, page->active, objnr); in slab_put_obj()
2556 page->slab_cache = cache; in slab_map_pages()
2557 page->freelist = freelist; in slab_map_pages()
2581 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_grow_begin()
2600 n->colour_next++; in cache_grow_begin()
2601 if (n->colour_next >= cachep->colour) in cache_grow_begin()
2602 n->colour_next = 0; in cache_grow_begin()
2604 offset = n->colour_next; in cache_grow_begin()
2605 if (offset >= cachep->colour) in cache_grow_begin()
2608 offset *= cachep->colour_off; in cache_grow_begin()
2612 * page_address() in the latter returns a non-tagged pointer, in cache_grow_begin()
2650 INIT_LIST_HEAD(&page->slab_list); in cache_grow_end()
2653 spin_lock(&n->list_lock); in cache_grow_end()
2654 n->total_slabs++; in cache_grow_end()
2655 if (!page->active) { in cache_grow_end()
2656 list_add_tail(&page->slab_list, &n->slabs_free); in cache_grow_end()
2657 n->free_slabs++; in cache_grow_end()
2662 n->free_objects += cachep->num - page->active; in cache_grow_end()
2663 spin_unlock(&n->list_lock); in cache_grow_end()
2672 * - detect bad pointers.
2673 * - POISON/RED_ZONE checking
2714 objp -= obj_offset(cachep); in cache_free_debugcheck()
2718 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2723 if (cachep->flags & SLAB_STORE_USER) in cache_free_debugcheck()
2728 BUG_ON(objnr >= cachep->num); in cache_free_debugcheck()
2731 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2751 objp = next - obj_offset(cachep); in fixup_objfreelist_debug()
2763 list_del(&page->slab_list); in fixup_slab_list()
2764 if (page->active == cachep->num) { in fixup_slab_list()
2765 list_add(&page->slab_list, &n->slabs_full); in fixup_slab_list()
2769 if (cachep->flags & SLAB_POISON) { in fixup_slab_list()
2770 void **objp = page->freelist; in fixup_slab_list()
2776 page->freelist = NULL; in fixup_slab_list()
2779 list_add(&page->slab_list, &n->slabs_partial); in fixup_slab_list()
2782 /* Try to find non-pfmemalloc slab if needed */
2796 if (n->free_objects > n->free_limit) { in get_valid_first_slab()
2802 list_del(&page->slab_list); in get_valid_first_slab()
2803 if (!page->active) { in get_valid_first_slab()
2804 list_add_tail(&page->slab_list, &n->slabs_free); in get_valid_first_slab()
2805 n->free_slabs++; in get_valid_first_slab()
2807 list_add_tail(&page->slab_list, &n->slabs_partial); in get_valid_first_slab()
2809 list_for_each_entry(page, &n->slabs_partial, slab_list) { in get_valid_first_slab()
2814 n->free_touched = 1; in get_valid_first_slab()
2815 list_for_each_entry(page, &n->slabs_free, slab_list) { in get_valid_first_slab()
2817 n->free_slabs--; in get_valid_first_slab()
2829 assert_spin_locked(&n->list_lock); in get_first_slab()
2830 page = list_first_entry_or_null(&n->slabs_partial, struct page, in get_first_slab()
2833 n->free_touched = 1; in get_first_slab()
2834 page = list_first_entry_or_null(&n->slabs_free, struct page, in get_first_slab()
2837 n->free_slabs--; in get_first_slab()
2856 spin_lock(&n->list_lock); in cache_alloc_pfmemalloc()
2859 spin_unlock(&n->list_lock); in cache_alloc_pfmemalloc()
2864 n->free_objects--; in cache_alloc_pfmemalloc()
2868 spin_unlock(&n->list_lock); in cache_alloc_pfmemalloc()
2885 BUG_ON(page->active >= cachep->num); in alloc_block()
2887 while (page->active < cachep->num && batchcount--) { in alloc_block()
2892 ac->entry[ac->avail++] = slab_get_obj(cachep, page); in alloc_block()
2911 batchcount = ac->batchcount; in cache_alloc_refill()
2912 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { in cache_alloc_refill()
2922 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2923 shared = READ_ONCE(n->shared); in cache_alloc_refill()
2924 if (!n->free_objects && (!shared || !shared->avail)) in cache_alloc_refill()
2927 spin_lock(&n->list_lock); in cache_alloc_refill()
2928 shared = READ_ONCE(n->shared); in cache_alloc_refill()
2932 shared->touched = 1; in cache_alloc_refill()
2949 n->free_objects -= ac->avail; in cache_alloc_refill()
2951 spin_unlock(&n->list_lock); in cache_alloc_refill()
2955 if (unlikely(!ac->avail)) { in cache_alloc_refill()
2971 if (!ac->avail && page) in cache_alloc_refill()
2975 if (!ac->avail) in cache_alloc_refill()
2978 ac->touched = 1; in cache_alloc_refill()
2980 return ac->entry[--ac->avail]; in cache_alloc_refill()
2993 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_alloc_debugcheck_after()
2996 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
3001 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
3004 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
3017 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
3018 cachep->ctor(objp); in cache_alloc_debugcheck_after()
3020 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { in cache_alloc_debugcheck_after()
3038 if (likely(ac->avail)) { in ____cache_alloc()
3039 ac->touched = 1; in ____cache_alloc()
3040 objp = ac->entry[--ac->avail]; in ____cache_alloc()
3057 * per-CPU caches is leaked, we need to make sure kmemleak doesn't in ____cache_alloc()
3061 kmemleak_erase(&ac->entry[ac->avail]); in ____cache_alloc()
3079 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
3081 else if (current->mempolicy) in alternate_node_alloc()
3124 get_node(cache, nid)->free_objects) { in fallback_alloc()
3161 * A interface to enable slab creation on nodeid
3176 spin_lock(&n->list_lock); in ____cache_alloc_node()
3187 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3190 n->free_objects--; in ____cache_alloc_node()
3194 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3199 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3254 memset(ptr, 0, cachep->object_size); in slab_alloc_node()
3265 if (current->mempolicy || cpuset_do_slab_mem_spread()) { in __do_cache_alloc()
3312 memset(objp, 0, cachep->object_size); in slab_alloc()
3329 n->free_objects += nr_objects; in free_block()
3338 list_del(&page->slab_list); in free_block()
3344 if (page->active == 0) { in free_block()
3345 list_add(&page->slab_list, &n->slabs_free); in free_block()
3346 n->free_slabs++; in free_block()
3349 * partial list on free - maximum time for the in free_block()
3352 list_add_tail(&page->slab_list, &n->slabs_partial); in free_block()
3356 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { in free_block()
3357 n->free_objects -= cachep->num; in free_block()
3359 page = list_last_entry(&n->slabs_free, struct page, slab_list); in free_block()
3360 list_move(&page->slab_list, list); in free_block()
3361 n->free_slabs--; in free_block()
3362 n->total_slabs--; in free_block()
3373 batchcount = ac->batchcount; in cache_flusharray()
3377 spin_lock(&n->list_lock); in cache_flusharray()
3378 if (n->shared) { in cache_flusharray()
3379 struct array_cache *shared_array = n->shared; in cache_flusharray()
3380 int max = shared_array->limit - shared_array->avail; in cache_flusharray()
3384 memcpy(&(shared_array->entry[shared_array->avail]), in cache_flusharray()
3385 ac->entry, sizeof(void *) * batchcount); in cache_flusharray()
3386 shared_array->avail += batchcount; in cache_flusharray()
3391 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3398 list_for_each_entry(page, &n->slabs_free, slab_list) { in cache_flusharray()
3399 BUG_ON(page->active); in cache_flusharray()
3406 spin_unlock(&n->list_lock); in cache_flusharray()
3407 ac->avail -= batchcount; in cache_flusharray()
3408 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); in cache_flusharray()
3423 /* Use KCSAN to help debug racy use-after-free. */ in __cache_free()
3424 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) in __cache_free()
3425 __kcsan_check_access(objp, cachep->object_size, in __cache_free()
3438 memset(objp, 0, cachep->object_size); in ___cache_free()
3439 kmemleak_free_recursive(objp, cachep->flags); in ___cache_free()
3453 if (ac->avail < ac->limit) { in ___cache_free()
3473 * kmem_cache_alloc - Allocate an object
3487 cachep->object_size, cachep->size, flags); in kmem_cache_alloc()
3530 memset(p[i], 0, s->object_size); in kmem_cache_alloc_bulk()
3554 size, cachep->size, flags); in kmem_cache_alloc_trace()
3562 * kmem_cache_alloc_node - Allocate an object on the specified node
3579 cachep->object_size, cachep->size, in kmem_cache_alloc_node()
3598 size, cachep->size, in kmem_cache_alloc_node_trace()
3637 * __do_kmalloc - allocate memory
3659 size, cachep->size, flags); in __do_kmalloc()
3677 * kmem_cache_free - Deallocate an object
3692 debug_check_no_locks_freed(objp, cachep->object_size); in kmem_cache_free()
3693 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free()
3694 debug_check_no_obj_freed(objp, cachep->object_size); in kmem_cache_free()
3718 debug_check_no_locks_freed(objp, s->object_size); in kmem_cache_free_bulk()
3719 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free_bulk()
3720 debug_check_no_obj_freed(objp, s->object_size); in kmem_cache_free_bulk()
3731 * kfree - free previously allocated memory
3755 debug_check_no_locks_freed(objp, c->object_size); in kfree()
3757 debug_check_no_obj_freed(objp, c->object_size); in kfree()
3782 if (!cachep->list.next) { in setup_kmem_cache_nodes()
3784 node--; in setup_kmem_cache_nodes()
3788 kfree(n->shared); in setup_kmem_cache_nodes()
3789 free_alien_cache(n->alien); in setup_kmem_cache_nodes()
3791 cachep->node[node] = NULL; in setup_kmem_cache_nodes()
3793 node--; in setup_kmem_cache_nodes()
3796 return -ENOMEM; in setup_kmem_cache_nodes()
3808 return -ENOMEM; in do_tune_cpucache()
3810 prev = cachep->cpu_cache; in do_tune_cpucache()
3811 cachep->cpu_cache = cpu_cache; in do_tune_cpucache()
3820 cachep->batchcount = batchcount; in do_tune_cpucache()
3821 cachep->limit = limit; in do_tune_cpucache()
3822 cachep->shared = shared; in do_tune_cpucache()
3835 spin_lock_irq(&n->list_lock); in do_tune_cpucache()
3836 free_block(cachep, ac->entry, ac->avail, node, &list); in do_tune_cpucache()
3837 spin_unlock_irq(&n->list_lock); in do_tune_cpucache()
3854 err = cache_random_seq_create(cachep, cachep->num, gfp); in enable_cpucache()
3862 * - create a LIFO ordering, i.e. return objects that are cache-warm in enable_cpucache()
3863 * - reduce the number of spinlock operations. in enable_cpucache()
3864 * - reduce the number of linked list operations on the slab and in enable_cpucache()
3866 * The numbers are guessed, we should auto-tune as described by in enable_cpucache()
3869 if (cachep->size > 131072) in enable_cpucache()
3871 else if (cachep->size > PAGE_SIZE) in enable_cpucache()
3873 else if (cachep->size > 1024) in enable_cpucache()
3875 else if (cachep->size > 256) in enable_cpucache()
3890 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) in enable_cpucache()
3907 cachep->name, -err); in enable_cpucache()
3921 /* ac from n->shared can be freed if we don't hold the slab_mutex. */ in drain_array()
3924 if (!ac || !ac->avail) in drain_array()
3927 if (ac->touched) { in drain_array()
3928 ac->touched = 0; in drain_array()
3932 spin_lock_irq(&n->list_lock); in drain_array()
3934 spin_unlock_irq(&n->list_lock); in drain_array()
3940 * cache_reap - Reclaim memory from caches.
3945 * - clear the per-cpu caches for this CPU.
3946 * - return freeable pages to the main free memory pool.
3948 * If we cannot acquire the cache chain mutex then just give up - we'll try
3980 if (time_after(n->next_reap, jiffies)) in cache_reap()
3983 n->next_reap = jiffies + REAPTIMEOUT_NODE; in cache_reap()
3985 drain_array(searchp, n, n->shared, node); in cache_reap()
3987 if (n->free_touched) in cache_reap()
3988 n->free_touched = 0; in cache_reap()
3992 freed = drain_freelist(searchp, n, (n->free_limit + in cache_reap()
3993 5 * searchp->num - 1) / (5 * searchp->num)); in cache_reap()
4018 spin_lock_irq(&n->list_lock); in get_slabinfo()
4020 total_slabs += n->total_slabs; in get_slabinfo()
4021 free_slabs += n->free_slabs; in get_slabinfo()
4022 free_objs += n->free_objects; in get_slabinfo()
4024 if (n->shared) in get_slabinfo()
4025 shared_avail += n->shared->avail; in get_slabinfo()
4027 spin_unlock_irq(&n->list_lock); in get_slabinfo()
4029 num_objs = total_slabs * cachep->num; in get_slabinfo()
4030 active_slabs = total_slabs - free_slabs; in get_slabinfo()
4031 active_objs = num_objs - free_objs; in get_slabinfo()
4033 sinfo->active_objs = active_objs; in get_slabinfo()
4034 sinfo->num_objs = num_objs; in get_slabinfo()
4035 sinfo->active_slabs = active_slabs; in get_slabinfo()
4036 sinfo->num_slabs = total_slabs; in get_slabinfo()
4037 sinfo->shared_avail = shared_avail; in get_slabinfo()
4038 sinfo->limit = cachep->limit; in get_slabinfo()
4039 sinfo->batchcount = cachep->batchcount; in get_slabinfo()
4040 sinfo->shared = cachep->shared; in get_slabinfo()
4041 sinfo->objects_per_slab = cachep->num; in get_slabinfo()
4042 sinfo->cache_order = cachep->gfporder; in get_slabinfo()
4049 unsigned long high = cachep->high_mark; in slabinfo_show_stats()
4050 unsigned long allocs = cachep->num_allocations; in slabinfo_show_stats()
4051 unsigned long grown = cachep->grown; in slabinfo_show_stats()
4052 unsigned long reaped = cachep->reaped; in slabinfo_show_stats()
4053 unsigned long errors = cachep->errors; in slabinfo_show_stats()
4054 unsigned long max_freeable = cachep->max_freeable; in slabinfo_show_stats()
4055 unsigned long node_allocs = cachep->node_allocs; in slabinfo_show_stats()
4056 unsigned long node_frees = cachep->node_frees; in slabinfo_show_stats()
4057 unsigned long overflows = cachep->node_overflow; in slabinfo_show_stats()
4066 unsigned long allochit = atomic_read(&cachep->allochit); in slabinfo_show_stats()
4067 unsigned long allocmiss = atomic_read(&cachep->allocmiss); in slabinfo_show_stats()
4068 unsigned long freehit = atomic_read(&cachep->freehit); in slabinfo_show_stats()
4069 unsigned long freemiss = atomic_read(&cachep->freemiss); in slabinfo_show_stats()
4079 * slabinfo_write - Tuning for the slab allocator
4095 return -EINVAL; in slabinfo_write()
4097 return -EFAULT; in slabinfo_write()
4102 return -EINVAL; in slabinfo_write()
4106 return -EINVAL; in slabinfo_write()
4110 res = -EINVAL; in slabinfo_write()
4112 if (!strcmp(cachep->name, kbuf)) { in slabinfo_write()
4149 cachep = page->slab_cache; in __check_heap_object()
4151 BUG_ON(objnr >= cachep->num); in __check_heap_object()
4154 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); in __check_heap_object()
4157 if (offset >= cachep->useroffset && in __check_heap_object()
4158 offset - cachep->useroffset <= cachep->usersize && in __check_heap_object()
4159 n <= cachep->useroffset - offset + cachep->usersize) in __check_heap_object()
4169 offset <= cachep->object_size && in __check_heap_object()
4170 n <= cachep->object_size - offset) { in __check_heap_object()
4171 usercopy_warn("SLAB object", cachep->name, to_user, offset, n); in __check_heap_object()
4175 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); in __check_heap_object()
4180 * __ksize -- Uninstrumented ksize.
4198 size = c ? c->object_size : 0; in __ksize()