Lines Matching +full:cache +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0 */
14 /* 1) Cache tunables. Protected by slab_mutex */
19 unsigned int size; member
33 size_t colour; /* cache colouring range */
41 /* 4) cache creation/removal */
67 * fields and/or padding to every object. 'size' contains the total
68 * object size including these internal fields, while 'obj_offset'
70 * size.
84 unsigned int usersize; /* Usercopy region size */
89 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, in nearest_obj() argument
92 void *object = x - (x - page->s_mem) % cache->size; in nearest_obj()
93 void *last_object = page->s_mem + (cache->num - 1) * cache->size; in nearest_obj()
102 * We want to avoid an expensive divide : (offset / cache->size)
103 * Using the fact that size is a constant for a particular cache,
104 * we can replace (offset / cache->size) by
105 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
107 static inline unsigned int obj_to_index(const struct kmem_cache *cache, in obj_to_index() argument
110 u32 offset = (obj - page->s_mem); in obj_to_index()
111 return reciprocal_divide(offset, cache->reciprocal_buffer_size); in obj_to_index()
114 static inline int objs_per_slab_page(const struct kmem_cache *cache, in objs_per_slab_page() argument
117 return cache->num; in objs_per_slab_page()