1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4 
5 /*
6  * SLUB : A Slab allocator without object queues.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 #include <linux/kobject.h>
11 #include <linux/reciprocal_div.h>
12 
13 enum stat_item {
14 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
15 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
16 	FREE_FASTPATH,		/* Free to cpu slab */
17 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
18 	FREE_FROZEN,		/* Freeing to frozen slab */
19 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
20 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
21 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
22 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
23 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
24 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
25 	FREE_SLAB,		/* Slab freed to the page allocator */
26 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
27 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
28 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
29 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
30 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
31 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
32 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
33 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
34 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
35 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
36 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
37 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
38 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
39 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
40 	NR_SLUB_STAT_ITEMS };
41 
42 struct kmem_cache_cpu {
43 	void **freelist;	/* Pointer to next available object */
44 	unsigned long tid;	/* Globally unique transaction id */
45 	struct page *page;	/* The slab from which we are allocating */
46 #ifdef CONFIG_SLUB_CPU_PARTIAL
47 	struct page *partial;	/* Partially allocated frozen slabs */
48 #endif
49 #ifdef CONFIG_SLUB_STATS
50 	unsigned stat[NR_SLUB_STAT_ITEMS];
51 #endif
52 };
53 
54 #ifdef CONFIG_SLUB_CPU_PARTIAL
55 #define slub_percpu_partial(c)		((c)->partial)
56 
57 #define slub_set_percpu_partial(c, p)		\
58 ({						\
59 	slub_percpu_partial(c) = (p)->next;	\
60 })
61 
62 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
63 #else
64 #define slub_percpu_partial(c)			NULL
65 
66 #define slub_set_percpu_partial(c, p)
67 
68 #define slub_percpu_partial_read_once(c)	NULL
69 #endif // CONFIG_SLUB_CPU_PARTIAL
70 
71 /*
72  * Word size structure that can be atomically updated or read and that
73  * contains both the order and the number of objects that a slab of the
74  * given order would contain.
75  */
76 struct kmem_cache_order_objects {
77 	unsigned int x;
78 };
79 
80 /*
81  * Slab cache management.
82  */
83 struct kmem_cache {
84 	struct kmem_cache_cpu __percpu *cpu_slab;
85 	/* Used for retrieving partial slabs, etc. */
86 	slab_flags_t flags;
87 	unsigned long min_partial;
88 	unsigned int size;	/* The size of an object including metadata */
89 	unsigned int object_size;/* The size of an object without metadata */
90 	struct reciprocal_value reciprocal_size;
91 	unsigned int offset;	/* Free pointer offset */
92 #ifdef CONFIG_SLUB_CPU_PARTIAL
93 	/* Number of per cpu partial objects to keep around */
94 	unsigned int cpu_partial;
95 #endif
96 	struct kmem_cache_order_objects oo;
97 
98 	/* Allocation and freeing of slabs */
99 	struct kmem_cache_order_objects max;
100 	struct kmem_cache_order_objects min;
101 	gfp_t allocflags;	/* gfp flags to use on each alloc */
102 	int refcount;		/* Refcount for slab cache destroy */
103 	void (*ctor)(void *);
104 	unsigned int inuse;		/* Offset to metadata */
105 	unsigned int align;		/* Alignment */
106 	unsigned int red_left_pad;	/* Left redzone padding size */
107 	const char *name;	/* Name (only for display!) */
108 	struct list_head list;	/* List of slab caches */
109 #ifdef CONFIG_SYSFS
110 	struct kobject kobj;	/* For sysfs */
111 #endif
112 #ifdef CONFIG_SLAB_FREELIST_HARDENED
113 	unsigned long random;
114 #endif
115 
116 #ifdef CONFIG_NUMA
117 	/*
118 	 * Defragmentation by allocating from a remote node.
119 	 */
120 	unsigned int remote_node_defrag_ratio;
121 #endif
122 
123 #ifdef CONFIG_SLAB_FREELIST_RANDOM
124 	unsigned int *random_seq;
125 #endif
126 
127 #ifdef CONFIG_KASAN
128 	struct kasan_cache kasan_info;
129 #endif
130 
131 	unsigned int useroffset;	/* Usercopy region offset */
132 	unsigned int usersize;		/* Usercopy region size */
133 
134 	struct kmem_cache_node *node[MAX_NUMNODES];
135 };
136 
137 #ifdef CONFIG_SLUB_CPU_PARTIAL
138 #define slub_cpu_partial(s)		((s)->cpu_partial)
139 #define slub_set_cpu_partial(s, n)		\
140 ({						\
141 	slub_cpu_partial(s) = (n);		\
142 })
143 #else
144 #define slub_cpu_partial(s)		(0)
145 #define slub_set_cpu_partial(s, n)
146 #endif /* CONFIG_SLUB_CPU_PARTIAL */
147 
148 #ifdef CONFIG_SYSFS
149 #define SLAB_SUPPORTS_SYSFS
150 void sysfs_slab_unlink(struct kmem_cache *);
151 void sysfs_slab_release(struct kmem_cache *);
152 #else
sysfs_slab_unlink(struct kmem_cache * s)153 static inline void sysfs_slab_unlink(struct kmem_cache *s)
154 {
155 }
sysfs_slab_release(struct kmem_cache * s)156 static inline void sysfs_slab_release(struct kmem_cache *s)
157 {
158 }
159 #endif
160 
161 void object_err(struct kmem_cache *s, struct page *page,
162 		u8 *object, char *reason);
163 
164 void *fixup_red_left(struct kmem_cache *s, void *p);
165 
nearest_obj(struct kmem_cache * cache,struct page * page,void * x)166 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
167 				void *x) {
168 	void *object = x - (x - page_address(page)) % cache->size;
169 	void *last_object = page_address(page) +
170 		(page->objects - 1) * cache->size;
171 	void *result = (unlikely(object > last_object)) ? last_object : object;
172 
173 	result = fixup_red_left(cache, result);
174 	return result;
175 }
176 
177 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)178 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
179 					  void *addr, void *obj)
180 {
181 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
182 				 cache->reciprocal_size);
183 }
184 
obj_to_index(const struct kmem_cache * cache,const struct page * page,void * obj)185 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
186 					const struct page *page, void *obj)
187 {
188 	return __obj_to_index(cache, page_address(page), obj);
189 }
190 
objs_per_slab_page(const struct kmem_cache * cache,const struct page * page)191 static inline int objs_per_slab_page(const struct kmem_cache *cache,
192 				     const struct page *page)
193 {
194 	return page->objects;
195 }
196 #endif /* _LINUX_SLUB_DEF_H */
197