Lines Matching full:size
158 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
162 unsigned int size, unsigned int align,
201 * ksize - Report actual allocation size of associated object
206 * allocation size. Either use krealloc() or round up the allocation size
208 * access beyond the originally requested allocation size, UBSAN_BOUNDS
210 * originally allocated size via the __alloc_size attribute.
299 /* Maximum allocatable size */
301 /* Maximum size for which we actually use a slab cache */
315 * Page size is normally 2^12 bytes and, in this case, if we want to use
316 * byte sized index which can represent 2^8 entries, the size of the object
318 * If minimum size of kmalloc is less than 16, we use it as minimum object
319 * size and give up to use byte sized index.
386 * Figure out which kmalloc slab an allocation of a certain size
398 static __always_inline unsigned int __kmalloc_index(size_t size, in __kmalloc_index() argument
401 if (!size) in __kmalloc_index()
404 if (size <= KMALLOC_MIN_SIZE) in __kmalloc_index()
407 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) in __kmalloc_index()
409 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) in __kmalloc_index()
411 if (size <= 8) return 3; in __kmalloc_index()
412 if (size <= 16) return 4; in __kmalloc_index()
413 if (size <= 32) return 5; in __kmalloc_index()
414 if (size <= 64) return 6; in __kmalloc_index()
415 if (size <= 128) return 7; in __kmalloc_index()
416 if (size <= 256) return 8; in __kmalloc_index()
417 if (size <= 512) return 9; in __kmalloc_index()
418 if (size <= 1024) return 10; in __kmalloc_index()
419 if (size <= 2 * 1024) return 11; in __kmalloc_index()
420 if (size <= 4 * 1024) return 12; in __kmalloc_index()
421 if (size <= 8 * 1024) return 13; in __kmalloc_index()
422 if (size <= 16 * 1024) return 14; in __kmalloc_index()
423 if (size <= 32 * 1024) return 15; in __kmalloc_index()
424 if (size <= 64 * 1024) return 16; in __kmalloc_index()
425 if (size <= 128 * 1024) return 17; in __kmalloc_index()
426 if (size <= 256 * 1024) return 18; in __kmalloc_index()
427 if (size <= 512 * 1024) return 19; in __kmalloc_index()
428 if (size <= 1024 * 1024) return 20; in __kmalloc_index()
429 if (size <= 2 * 1024 * 1024) return 21; in __kmalloc_index()
432 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); in __kmalloc_index()
443 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
456 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
457 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
463 static __always_inline void kfree_bulk(size_t size, void **p) in kfree_bulk() argument
465 kmem_cache_free_bulk(NULL, size, p); in kfree_bulk()
468 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
473 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
477 int node, size_t size) __assume_kmalloc_alignment
479 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
482 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
487 * @size: how many bytes of memory are required.
491 * for objects smaller than page size in the kernel.
494 * bytes. For @size of power of two bytes, the alignment is also guaranteed
495 * to be at least to the size.
539 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) in kmalloc() argument
541 if (__builtin_constant_p(size)) { in kmalloc()
545 if (size > KMALLOC_MAX_CACHE_SIZE) in kmalloc()
546 return kmalloc_large(size, flags); in kmalloc()
548 index = kmalloc_index(size); in kmalloc()
555 flags, size); in kmalloc()
558 return __kmalloc(size, flags); in kmalloc()
562 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) in kmalloc_node() argument
564 if (__builtin_constant_p(size)) { in kmalloc_node()
567 if (size > KMALLOC_MAX_CACHE_SIZE) in kmalloc_node()
568 return kmalloc_large_node(size, flags, node); in kmalloc_node()
570 index = kmalloc_index(size); in kmalloc_node()
577 flags, node, size); in kmalloc_node()
579 return __kmalloc_node(size, flags, node); in kmalloc_node()
582 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) in kmalloc_node() argument
584 if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE) in kmalloc_node()
585 return kmalloc_large_node(size, flags, node); in kmalloc_node()
587 return __kmalloc_node(size, flags, node); in kmalloc_node()
594 * @size: element size.
597 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) in kmalloc_array() argument
601 if (unlikely(check_mul_overflow(n, size, &bytes))) in kmalloc_array()
603 if (__builtin_constant_p(n) && __builtin_constant_p(size)) in kmalloc_array()
612 * @new_size: new size of a single member of the array
631 * @size: element size.
634 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) in kcalloc() argument
636 return kmalloc_array(n, size, flags | __GFP_ZERO); in kcalloc()
639 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
641 #define kmalloc_node_track_caller(size, flags, node) \ argument
642 __kmalloc_node_track_caller(size, flags, node, \
653 #define kmalloc_track_caller(size, flags) \ argument
654 __kmalloc_node_track_caller(size, flags, \
657 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, in kmalloc_array_node() argument
662 if (unlikely(check_mul_overflow(n, size, &bytes))) in kmalloc_array_node()
664 if (__builtin_constant_p(n) && __builtin_constant_p(size)) in kmalloc_array_node()
669 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) in kcalloc_node() argument
671 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); in kcalloc_node()
684 * @size: how many bytes of memory are required.
687 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) in kzalloc() argument
689 return kmalloc(size, flags | __GFP_ZERO); in kzalloc()
694 * @size: how many bytes of memory are required.
698 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) in kzalloc_node() argument
700 return kmalloc_node(size, flags | __GFP_ZERO, node); in kzalloc_node()
703 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
704 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) in kvmalloc() argument
706 return kvmalloc_node(size, flags, NUMA_NO_NODE); in kvmalloc()
708 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) in kvzalloc_node() argument
710 return kvmalloc_node(size, flags | __GFP_ZERO, node); in kvzalloc_node()
712 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) in kvzalloc() argument
714 return kvmalloc(size, flags | __GFP_ZERO); in kvzalloc()
717 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) in kvmalloc_array() argument
721 if (unlikely(check_mul_overflow(n, size, &bytes))) in kvmalloc_array()
727 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) in kvcalloc() argument
729 return kvmalloc_array(n, size, flags | __GFP_ZERO); in kvcalloc()
740 * kmalloc_size_roundup - Report allocation bucket size for the given size
742 * @size: Number of bytes to round up from.
745 * allocation of @size bytes. For example, a 126 byte request would be
750 * Use this to kmalloc() the full bucket size ahead of time instead of using
751 * ksize() to query the size after an allocation.
753 size_t kmalloc_size_roundup(size_t size);