Lines Matching full:size

147 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
151 unsigned int size, unsigned int align,
239 * The largest kmalloc size supported by the SLAB allocators is
280 /* Maximum allocatable size */
282 /* Maximum size for which we actually use a slab cache */
296 * Page size is normally 2^12 bytes and, in this case, if we want to use
297 * byte sized index which can represent 2^8 entries, the size of the object
299 * If minimum size of kmalloc is less than 16, we use it as minimum object
300 * size and give up to use byte sized index.
367 * Figure out which kmalloc slab an allocation of a certain size
379 static __always_inline unsigned int __kmalloc_index(size_t size, in __kmalloc_index() argument
382 if (!size) in __kmalloc_index()
385 if (size <= KMALLOC_MIN_SIZE) in __kmalloc_index()
388 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) in __kmalloc_index()
390 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) in __kmalloc_index()
392 if (size <= 8) return 3; in __kmalloc_index()
393 if (size <= 16) return 4; in __kmalloc_index()
394 if (size <= 32) return 5; in __kmalloc_index()
395 if (size <= 64) return 6; in __kmalloc_index()
396 if (size <= 128) return 7; in __kmalloc_index()
397 if (size <= 256) return 8; in __kmalloc_index()
398 if (size <= 512) return 9; in __kmalloc_index()
399 if (size <= 1024) return 10; in __kmalloc_index()
400 if (size <= 2 * 1024) return 11; in __kmalloc_index()
401 if (size <= 4 * 1024) return 12; in __kmalloc_index()
402 if (size <= 8 * 1024) return 13; in __kmalloc_index()
403 if (size <= 16 * 1024) return 14; in __kmalloc_index()
404 if (size <= 32 * 1024) return 15; in __kmalloc_index()
405 if (size <= 64 * 1024) return 16; in __kmalloc_index()
406 if (size <= 128 * 1024) return 17; in __kmalloc_index()
407 if (size <= 256 * 1024) return 18; in __kmalloc_index()
408 if (size <= 512 * 1024) return 19; in __kmalloc_index()
409 if (size <= 1024 * 1024) return 20; in __kmalloc_index()
410 if (size <= 2 * 1024 * 1024) return 21; in __kmalloc_index()
411 if (size <= 4 * 1024 * 1024) return 22; in __kmalloc_index()
412 if (size <= 8 * 1024 * 1024) return 23; in __kmalloc_index()
413 if (size <= 16 * 1024 * 1024) return 24; in __kmalloc_index()
414 if (size <= 32 * 1024 * 1024) return 25; in __kmalloc_index()
418 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); in __kmalloc_index()
428 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
446 static __always_inline void kfree_bulk(size_t size, void **p) in kfree_bulk() argument
448 kmem_cache_free_bulk(NULL, size, p); in kfree_bulk()
452 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
455 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) in __kmalloc_node() argument
457 return __kmalloc(size, flags); in __kmalloc_node()
472 int node, size_t size) __assume_slab_alignment __malloc;
477 int node, size_t size) in kmem_cache_alloc_node_trace() argument
479 return kmem_cache_alloc_trace(s, gfpflags, size); in kmem_cache_alloc_node_trace()
485 gfp_t flags, size_t size) in kmem_cache_alloc_trace() argument
489 ret = kasan_kmalloc(s, ret, size, flags); in kmem_cache_alloc_trace()
496 int node, size_t size) in kmem_cache_alloc_node_trace() argument
500 ret = kasan_kmalloc(s, ret, size, gfpflags); in kmem_cache_alloc_node_trace()
505 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __…
508 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignm…
511 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument
513 return kmalloc_order(size, flags, order); in kmalloc_order_trace()
517 static __always_inline void *kmalloc_large(size_t size, gfp_t flags) in kmalloc_large() argument
519 unsigned int order = get_order(size); in kmalloc_large()
520 return kmalloc_order_trace(size, flags, order); in kmalloc_large()
525 * @size: how many bytes of memory are required.
529 * for objects smaller than page size in the kernel.
532 * bytes. For @size of power of two bytes, the alignment is also guaranteed
533 * to be at least to the size.
577 static __always_inline void *kmalloc(size_t size, gfp_t flags) in kmalloc() argument
579 if (__builtin_constant_p(size)) { in kmalloc()
583 if (size > KMALLOC_MAX_CACHE_SIZE) in kmalloc()
584 return kmalloc_large(size, flags); in kmalloc()
586 index = kmalloc_index(size); in kmalloc()
593 flags, size); in kmalloc()
596 return __kmalloc(size, flags); in kmalloc()
599 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) in kmalloc_node() argument
602 if (__builtin_constant_p(size) && in kmalloc_node()
603 size <= KMALLOC_MAX_CACHE_SIZE) { in kmalloc_node()
604 unsigned int i = kmalloc_index(size); in kmalloc_node()
611 flags, node, size); in kmalloc_node()
614 return __kmalloc_node(size, flags, node); in kmalloc_node()
620 * @size: element size.
623 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) in kmalloc_array() argument
627 if (unlikely(check_mul_overflow(n, size, &bytes))) in kmalloc_array()
629 if (__builtin_constant_p(n) && __builtin_constant_p(size)) in kmalloc_array()
638 * @new_size: new size of a single member of the array
655 * @size: element size.
658 static inline void *kcalloc(size_t n, size_t size, gfp_t flags) in kcalloc() argument
660 return kmalloc_array(n, size, flags | __GFP_ZERO); in kcalloc()
672 #define kmalloc_track_caller(size, flags) \ argument
673 __kmalloc_track_caller(size, flags, _RET_IP_)
675 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, in kmalloc_array_node() argument
680 if (unlikely(check_mul_overflow(n, size, &bytes))) in kmalloc_array_node()
682 if (__builtin_constant_p(n) && __builtin_constant_p(size)) in kmalloc_array_node()
687 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) in kcalloc_node() argument
689 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); in kcalloc_node()
695 #define kmalloc_node_track_caller(size, flags, node) \ argument
696 __kmalloc_node_track_caller(size, flags, node, \
701 #define kmalloc_node_track_caller(size, flags, node) \ argument
702 kmalloc_track_caller(size, flags)
716 * @size: how many bytes of memory are required.
719 static inline void *kzalloc(size_t size, gfp_t flags) in kzalloc() argument
721 return kmalloc(size, flags | __GFP_ZERO); in kzalloc()
726 * @size: how many bytes of memory are required.
730 static inline void *kzalloc_node(size_t size, gfp_t flags, int node) in kzalloc_node() argument
732 return kmalloc_node(size, flags | __GFP_ZERO, node); in kzalloc_node()