Lines Matching +full:cache +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0 */
15 /* 1) Cache tunables. Protected by slab_mutex */
20 unsigned int size; member
34 size_t colour; /* cache colouring range */
42 /* 4) cache creation/removal */
68 * fields and/or padding to every object. 'size' contains the total
69 * object size including these internal fields, while 'obj_offset'
71 * size.
85 unsigned int usersize; /* Usercopy region size */
90 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, in nearest_obj() argument
93 void *object = x - (x - page->s_mem) % cache->size; in nearest_obj()
94 void *last_object = page->s_mem + (cache->num - 1) * cache->size; in nearest_obj()
103 * We want to avoid an expensive divide : (offset / cache->size)
104 * Using the fact that size is a constant for a particular cache,
105 * we can replace (offset / cache->size) by
106 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
108 static inline unsigned int obj_to_index(const struct kmem_cache *cache, in obj_to_index() argument
111 u32 offset = (obj - page->s_mem); in obj_to_index()
112 return reciprocal_divide(offset, cache->reciprocal_buffer_size); in obj_to_index()
115 static inline int objs_per_slab_page(const struct kmem_cache *cache, in objs_per_slab_page() argument
120 return cache->num; in objs_per_slab_page()