Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
17 * and within each page, there is a singly-linked list of free blocks
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
26 * address-ordered first fit.
29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
31 * alloc_pages() directly, allocating compound pages so the page order
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
47 * logic down to the page allocator, and simply doing the node accounting
55 * page flags. As a result, block allocations that can be satisfied from
78 * slob_block has a field 'units', which indicates size of block if +ve,
79 * or offset of next block if -ve (in SLOB_UNITs).
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
115 list_add(&slab->slab_list, list); in set_slob_page_free()
121 list_del(&slab->slab_list); in clear_slob_page_free()
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) argument
135 int size; member
144 * Encode the given size and next info into a free slob block s.
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
149 slobidx_t offset = next - base; in set_slob()
151 if (size > 1) { in set_slob()
152 s[0].units = size; in set_slob()
155 s[0].units = -offset; in set_slob()
159 * Return the size of a slob block.
163 if (s->units > 0) in slob_units()
164 return s->units; in slob_units()
177 next = -s[0].units; in slob_next()
184 * Returns true if s is the last free block in its page.
193 struct page *page; in slob_new_pages() local
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
200 page = alloc_pages(gfp, order); in slob_new_pages()
202 if (!page) in slob_new_pages()
205 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B, in slob_new_pages()
207 return page_address(page); in slob_new_pages()
212 struct page *sp = virt_to_page(b); in slob_free_pages()
214 if (current->reclaim_state) in slob_free_pages()
215 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages()
218 -(PAGE_SIZE << order)); in slob_free_pages()
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
230 * Tries to find a chunk of memory at least @size bytes big within @page.
233 * allocation fills up @page then the page is removed from the
237 static void *slob_page_alloc(struct slab *sp, size_t size, int align, in slob_page_alloc() argument
241 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc()
244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc()
251 * allocated block with its size, so that the block itself is in slob_page_alloc()
257 - align_offset); in slob_page_alloc()
258 delta = aligned - cur; in slob_page_alloc()
265 set_slob(aligned, avail - delta, next); in slob_page_alloc()
277 sp->freelist = next; in slob_page_alloc()
282 sp->freelist = cur + units; in slob_page_alloc()
283 set_slob(cur + units, avail - units, next); in slob_page_alloc()
286 sp->units -= units; in slob_page_alloc()
287 if (!sp->units) { in slob_page_alloc()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
311 if (size < SLOB_BREAK1) in slob_alloc()
313 else if (size < SLOB_BREAK2) in slob_alloc()
319 /* Iterate through each partially free page, try to find room */ in slob_alloc()
325 * page with a matching node id in the freelist. in slob_alloc()
330 /* Enough room on this page? */ in slob_alloc()
331 if (sp->units < SLOB_UNITS(size)) in slob_alloc()
334 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list); in slob_alloc()
341 * did not fragment the page anyway so optimisation is in slob_alloc()
350 if (!list_is_first(&sp->slab_list, slob_list)) in slob_alloc()
351 list_rotate_to_front(&sp->slab_list, slob_list); in slob_alloc()
357 /* Not enough space: must allocate a new page */ in slob_alloc()
367 sp->units = SLOB_UNITS(PAGE_SIZE); in slob_alloc()
368 sp->freelist = b; in slob_alloc()
369 INIT_LIST_HEAD(&sp->slab_list); in slob_alloc()
372 b = slob_page_alloc(sp, size, align, align_offset, &_unused); in slob_alloc()
377 memset(b, 0, size); in slob_alloc()
384 static void slob_free(void *block, int size) in slob_free() argument
394 BUG_ON(!size); in slob_free()
397 units = SLOB_UNITS(size); in slob_free()
401 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { in slob_free()
402 /* Go directly to page allocator. Do not pass slob allocator */ in slob_free()
412 /* This slob page is about to become partially free. Easy! */ in slob_free()
413 sp->units = units; in slob_free()
414 sp->freelist = b; in slob_free()
418 if (size < SLOB_BREAK1) in slob_free()
420 else if (size < SLOB_BREAK2) in slob_free()
429 * Otherwise the page is already partially free, so find reinsertion in slob_free()
432 sp->units += units; in slob_free()
434 if (b < (slob_t *)sp->freelist) { in slob_free()
435 if (b + units == sp->freelist) { in slob_free()
436 units += slob_units(sp->freelist); in slob_free()
437 sp->freelist = slob_next(sp->freelist); in slob_free()
439 set_slob(b, units, sp->freelist); in slob_free()
440 sp->freelist = b; in slob_free()
442 prev = sp->freelist; in slob_free()
468 kpp->kp_ptr = object; in __kmem_obj_info()
469 kpp->kp_slab = slab; in __kmem_obj_info()
478 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
490 if (size < PAGE_SIZE - minalign) { in __do_kmalloc_node()
497 if (is_power_of_2(size)) in __do_kmalloc_node()
498 align = max_t(unsigned int, minalign, size); in __do_kmalloc_node()
500 if (!size) in __do_kmalloc_node()
503 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
507 *m = size; in __do_kmalloc_node()
510 trace_kmalloc(caller, ret, size, size + minalign, gfp, node); in __do_kmalloc_node()
512 unsigned int order = get_order(size); in __do_kmalloc_node()
518 trace_kmalloc(caller, ret, size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
521 kmemleak_alloc(ret, size, 1, gfp); in __do_kmalloc_node()
525 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() argument
527 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc()
531 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() argument
534 return __do_kmalloc_node(size, gfp, node, caller); in __kmalloc_node_track_caller()
553 unsigned int *m = (unsigned int *)(block - align); in kfree()
560 -(PAGE_SIZE << order)); in kfree()
567 size_t kmalloc_size_roundup(size_t size) in kmalloc_size_roundup() argument
569 /* Short-circuit the 0 size case. */ in kmalloc_size_roundup()
570 if (unlikely(size == 0)) in kmalloc_size_roundup()
572 /* Short-circuit saturated "too-large" case. */ in kmalloc_size_roundup()
573 if (unlikely(size == SIZE_MAX)) in kmalloc_size_roundup()
576 return ALIGN(size, ARCH_KMALLOC_MINALIGN); in kmalloc_size_roundup()
598 m = (unsigned int *)(block - align); in __ksize()
606 c->size += sizeof(struct slob_rcu); in __kmem_cache_create()
609 /* Actual size allocated */ in __kmem_cache_create()
610 c->size = SLOB_UNITS(c->size) * SLOB_UNIT; in __kmem_cache_create()
611 c->flags = flags; in __kmem_cache_create()
623 if (c->size < PAGE_SIZE) { in slob_alloc_node()
624 b = slob_alloc(c->size, flags, c->align, node, 0); in slob_alloc_node()
627 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
631 if (b && c->ctor) { in slob_alloc_node()
633 c->ctor(b); in slob_alloc_node()
636 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); in slob_alloc_node()
653 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() argument
655 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node()
665 static void __kmem_cache_free(void *b, int size) in __kmem_cache_free() argument
667 if (size < PAGE_SIZE) in __kmem_cache_free()
668 slob_free(b, size); in __kmem_cache_free()
670 slob_free_pages(b, get_order(size)); in __kmem_cache_free()
676 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); in kmem_rcu_free()
678 __kmem_cache_free(b, slob_rcu->size); in kmem_rcu_free()
683 kmemleak_free_recursive(b, c->flags); in kmem_cache_free()
685 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { in kmem_cache_free()
687 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); in kmem_cache_free()
688 slob_rcu->size = c->size; in kmem_cache_free()
689 call_rcu(&slob_rcu->head, kmem_rcu_free); in kmem_cache_free()
691 __kmem_cache_free(b, c->size); in kmem_cache_free()
743 .size = sizeof(struct kmem_cache),