1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdlib.h>
3 #include <string.h>
4 #include <malloc.h>
5 #include <pthread.h>
6 #include <unistd.h>
7 #include <assert.h>
8
9 #include <linux/gfp.h>
10 #include <linux/poison.h>
11 #include <linux/slab.h>
12 #include <linux/radix-tree.h>
13 #include <urcu/uatomic.h>
14
15 int nr_allocated;
16 int preempt_count;
17 int test_verbose;
18
19 struct kmem_cache {
20 pthread_mutex_t lock;
21 unsigned int size;
22 unsigned int align;
23 int nr_objs;
24 void *objs;
25 void (*ctor)(void *);
26 unsigned int non_kernel;
27 unsigned long nr_allocated;
28 unsigned long nr_tallocated;
29 };
30
kmem_cache_set_non_kernel(struct kmem_cache * cachep,unsigned int val)31 void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
32 {
33 cachep->non_kernel = val;
34 }
35
kmem_cache_get_alloc(struct kmem_cache * cachep)36 unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
37 {
38 return cachep->size * cachep->nr_allocated;
39 }
40
kmem_cache_nr_allocated(struct kmem_cache * cachep)41 unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
42 {
43 return cachep->nr_allocated;
44 }
45
kmem_cache_nr_tallocated(struct kmem_cache * cachep)46 unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
47 {
48 return cachep->nr_tallocated;
49 }
50
kmem_cache_zero_nr_tallocated(struct kmem_cache * cachep)51 void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
52 {
53 cachep->nr_tallocated = 0;
54 }
55
kmem_cache_alloc_lru(struct kmem_cache * cachep,struct list_lru * lru,int gfp)56 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
57 int gfp)
58 {
59 void *p;
60
61 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
62 if (!cachep->non_kernel)
63 return NULL;
64
65 cachep->non_kernel--;
66 }
67
68 pthread_mutex_lock(&cachep->lock);
69 if (cachep->nr_objs) {
70 struct radix_tree_node *node = cachep->objs;
71 cachep->nr_objs--;
72 cachep->objs = node->parent;
73 pthread_mutex_unlock(&cachep->lock);
74 node->parent = NULL;
75 p = node;
76 } else {
77 pthread_mutex_unlock(&cachep->lock);
78 if (cachep->align)
79 posix_memalign(&p, cachep->align, cachep->size);
80 else
81 p = malloc(cachep->size);
82 if (cachep->ctor)
83 cachep->ctor(p);
84 else if (gfp & __GFP_ZERO)
85 memset(p, 0, cachep->size);
86 }
87
88 uatomic_inc(&cachep->nr_allocated);
89 uatomic_inc(&nr_allocated);
90 uatomic_inc(&cachep->nr_tallocated);
91 if (kmalloc_verbose)
92 printf("Allocating %p from slab\n", p);
93 return p;
94 }
95
kmem_cache_free_locked(struct kmem_cache * cachep,void * objp)96 void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
97 {
98 assert(objp);
99 uatomic_dec(&nr_allocated);
100 uatomic_dec(&cachep->nr_allocated);
101 if (kmalloc_verbose)
102 printf("Freeing %p to slab\n", objp);
103 if (cachep->nr_objs > 10 || cachep->align) {
104 memset(objp, POISON_FREE, cachep->size);
105 free(objp);
106 } else {
107 struct radix_tree_node *node = objp;
108 cachep->nr_objs++;
109 node->parent = cachep->objs;
110 cachep->objs = node;
111 }
112 }
113
kmem_cache_free(struct kmem_cache * cachep,void * objp)114 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
115 {
116 pthread_mutex_lock(&cachep->lock);
117 kmem_cache_free_locked(cachep, objp);
118 pthread_mutex_unlock(&cachep->lock);
119 }
120
kmem_cache_free_bulk(struct kmem_cache * cachep,size_t size,void ** list)121 void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
122 {
123 if (kmalloc_verbose)
124 pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
125
126 pthread_mutex_lock(&cachep->lock);
127 for (int i = 0; i < size; i++)
128 kmem_cache_free_locked(cachep, list[i]);
129 pthread_mutex_unlock(&cachep->lock);
130 }
131
kmem_cache_shrink(struct kmem_cache * cachep)132 void kmem_cache_shrink(struct kmem_cache *cachep)
133 {
134 }
135
kmem_cache_alloc_bulk(struct kmem_cache * cachep,gfp_t gfp,size_t size,void ** p)136 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
137 void **p)
138 {
139 size_t i;
140
141 if (kmalloc_verbose)
142 pr_debug("Bulk alloc %lu\n", size);
143
144 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
145 if (cachep->non_kernel < size)
146 return 0;
147
148 cachep->non_kernel -= size;
149 }
150
151 pthread_mutex_lock(&cachep->lock);
152 if (cachep->nr_objs >= size) {
153 struct radix_tree_node *node;
154
155 for (i = 0; i < size; i++) {
156 node = cachep->objs;
157 cachep->nr_objs--;
158 cachep->objs = node->parent;
159 p[i] = node;
160 node->parent = NULL;
161 }
162 pthread_mutex_unlock(&cachep->lock);
163 } else {
164 pthread_mutex_unlock(&cachep->lock);
165 for (i = 0; i < size; i++) {
166 if (cachep->align) {
167 posix_memalign(&p[i], cachep->align,
168 cachep->size * size);
169 } else {
170 p[i] = malloc(cachep->size * size);
171 }
172 if (cachep->ctor)
173 cachep->ctor(p[i]);
174 else if (gfp & __GFP_ZERO)
175 memset(p[i], 0, cachep->size);
176 }
177 }
178
179 for (i = 0; i < size; i++) {
180 uatomic_inc(&nr_allocated);
181 uatomic_inc(&cachep->nr_allocated);
182 uatomic_inc(&cachep->nr_tallocated);
183 if (kmalloc_verbose)
184 printf("Allocating %p from slab\n", p[i]);
185 }
186
187 return size;
188 }
189
190 struct kmem_cache *
kmem_cache_create(const char * name,unsigned int size,unsigned int align,unsigned int flags,void (* ctor)(void *))191 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
192 unsigned int flags, void (*ctor)(void *))
193 {
194 struct kmem_cache *ret = malloc(sizeof(*ret));
195
196 pthread_mutex_init(&ret->lock, NULL);
197 ret->size = size;
198 ret->align = align;
199 ret->nr_objs = 0;
200 ret->nr_allocated = 0;
201 ret->nr_tallocated = 0;
202 ret->objs = NULL;
203 ret->ctor = ctor;
204 ret->non_kernel = 0;
205 return ret;
206 }
207
208 /*
209 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
210 */
test_kmem_cache_bulk(void)211 void test_kmem_cache_bulk(void)
212 {
213 int i;
214 void *list[12];
215 static struct kmem_cache *test_cache, *test_cache2;
216
217 /*
218 * Testing the bulk allocators without aligned kmem_cache to force the
219 * bulk alloc/free to reuse
220 */
221 test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
222
223 for (i = 0; i < 5; i++)
224 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
225
226 for (i = 0; i < 5; i++)
227 kmem_cache_free(test_cache, list[i]);
228 assert(test_cache->nr_objs == 5);
229
230 kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
231 kmem_cache_free_bulk(test_cache, 5, list);
232
233 for (i = 0; i < 12 ; i++)
234 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
235
236 for (i = 0; i < 12; i++)
237 kmem_cache_free(test_cache, list[i]);
238
239 /* The last free will not be kept around */
240 assert(test_cache->nr_objs == 11);
241
242 /* Aligned caches will immediately free */
243 test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
244
245 kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
246 kmem_cache_free_bulk(test_cache2, 10, list);
247 assert(!test_cache2->nr_objs);
248
249
250 }
251