1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SLOB Allocator: Simple List Of Blocks
4 *
5 * Matt Mackall <mpm@selenic.com> 12/30/03
6 *
7 * NUMA support by Paul Mundt, 2007.
8 *
9 * How SLOB works:
10 *
11 * The core of SLOB is a traditional K&R style heap allocator, with
12 * support for returning aligned objects. The granularity of this
13 * allocator is as little as 2 bytes, however typically most architectures
14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15 *
16 * The slob heap is a set of linked list of pages from alloc_pages(),
17 * and within each page, there is a singly-linked list of free blocks
18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
19 * heap pages are segregated into three lists, with objects less than
20 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 *
22 * Allocation from heap involves first searching for a page with
23 * sufficient free blocks (using a next-fit-like approach) followed by
24 * a first-fit scan of the page. Deallocation inserts objects back
25 * into the free list in address order, so this is effectively an
26 * address-ordered first fit.
27 *
28 * Above this is an implementation of kmalloc/kfree. Blocks returned
29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
31 * alloc_pages() directly, allocating compound pages so the page order
32 * does not have to be separately tracked.
33 * These objects are detected in kfree() because folio_test_slab()
34 * is false for them.
35 *
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
44 * allocations.
45 *
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, __alloc_pages_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
52 *
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
58 */
59
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62
63 #include <linux/mm.h>
64 #include <linux/swap.h> /* struct reclaim_state */
65 #include <linux/cache.h>
66 #include <linux/init.h>
67 #include <linux/export.h>
68 #include <linux/rcupdate.h>
69 #include <linux/list.h>
70 #include <linux/kmemleak.h>
71
72 #include <trace/events/kmem.h>
73
74 #include <linux/atomic.h>
75
76 #include "slab.h"
77 /*
78 * slob_block has a field 'units', which indicates size of block if +ve,
79 * or offset of next block if -ve (in SLOB_UNITs).
80 *
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
83 * memory, and the offset of the next free block in the second SLOB_UNIT.
84 */
85 #if PAGE_SIZE <= (32767 * 2)
86 typedef s16 slobidx_t;
87 #else
88 typedef s32 slobidx_t;
89 #endif
90
91 struct slob_block {
92 slobidx_t units;
93 };
94 typedef struct slob_block slob_t;
95
96 /*
97 * All partially free slob pages go on these lists.
98 */
99 #define SLOB_BREAK1 256
100 #define SLOB_BREAK2 1024
101 static LIST_HEAD(free_slob_small);
102 static LIST_HEAD(free_slob_medium);
103 static LIST_HEAD(free_slob_large);
104
105 /*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
slob_page_free(struct slab * slab)108 static inline int slob_page_free(struct slab *slab)
109 {
110 return PageSlobFree(slab_page(slab));
111 }
112
set_slob_page_free(struct slab * slab,struct list_head * list)113 static void set_slob_page_free(struct slab *slab, struct list_head *list)
114 {
115 list_add(&slab->slab_list, list);
116 __SetPageSlobFree(slab_page(slab));
117 }
118
clear_slob_page_free(struct slab * slab)119 static inline void clear_slob_page_free(struct slab *slab)
120 {
121 list_del(&slab->slab_list);
122 __ClearPageSlobFree(slab_page(slab));
123 }
124
125 #define SLOB_UNIT sizeof(slob_t)
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
127
128 /*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
132 */
133 struct slob_rcu {
134 struct rcu_head head;
135 int size;
136 };
137
138 /*
139 * slob_lock protects all slob allocator structures.
140 */
141 static DEFINE_SPINLOCK(slob_lock);
142
143 /*
144 * Encode the given size and next info into a free slob block s.
145 */
set_slob(slob_t * s,slobidx_t size,slob_t * next)146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147 {
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 slobidx_t offset = next - base;
150
151 if (size > 1) {
152 s[0].units = size;
153 s[1].units = offset;
154 } else
155 s[0].units = -offset;
156 }
157
158 /*
159 * Return the size of a slob block.
160 */
slob_units(slob_t * s)161 static slobidx_t slob_units(slob_t *s)
162 {
163 if (s->units > 0)
164 return s->units;
165 return 1;
166 }
167
168 /*
169 * Return the next free slob block pointer after this one.
170 */
slob_next(slob_t * s)171 static slob_t *slob_next(slob_t *s)
172 {
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174 slobidx_t next;
175
176 if (s[0].units < 0)
177 next = -s[0].units;
178 else
179 next = s[1].units;
180 return base+next;
181 }
182
183 /*
184 * Returns true if s is the last free block in its page.
185 */
slob_last(slob_t * s)186 static int slob_last(slob_t *s)
187 {
188 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189 }
190
slob_new_pages(gfp_t gfp,int order,int node)191 static void *slob_new_pages(gfp_t gfp, int order, int node)
192 {
193 struct page *page;
194
195 #ifdef CONFIG_NUMA
196 if (node != NUMA_NO_NODE)
197 page = __alloc_pages_node(node, gfp, order);
198 else
199 #endif
200 page = alloc_pages(gfp, order);
201
202 if (!page)
203 return NULL;
204
205 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
206 PAGE_SIZE << order);
207 return page_address(page);
208 }
209
slob_free_pages(void * b,int order)210 static void slob_free_pages(void *b, int order)
211 {
212 struct page *sp = virt_to_page(b);
213
214 if (current->reclaim_state)
215 current->reclaim_state->reclaimed_slab += 1 << order;
216
217 mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
218 -(PAGE_SIZE << order));
219 __free_pages(sp, order);
220 }
221
222 /*
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
226 * @align: Allocation alignment.
227 * @align_offset: Offset in the allocated block that will be aligned.
228 * @page_removed_from_list: Return parameter.
229 *
230 * Tries to find a chunk of memory at least @size bytes big within @page.
231 *
232 * Return: Pointer to memory if allocated, %NULL otherwise. If the
233 * allocation fills up @page then the page is removed from the
234 * freelist, in this case @page_removed_from_list will be set to
235 * true (set to false otherwise).
236 */
slob_page_alloc(struct slab * sp,size_t size,int align,int align_offset,bool * page_removed_from_list)237 static void *slob_page_alloc(struct slab *sp, size_t size, int align,
238 int align_offset, bool *page_removed_from_list)
239 {
240 slob_t *prev, *cur, *aligned = NULL;
241 int delta = 0, units = SLOB_UNITS(size);
242
243 *page_removed_from_list = false;
244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
245 slobidx_t avail = slob_units(cur);
246
247 /*
248 * 'aligned' will hold the address of the slob block so that the
249 * address 'aligned'+'align_offset' is aligned according to the
250 * 'align' parameter. This is for kmalloc() which prepends the
251 * allocated block with its size, so that the block itself is
252 * aligned when needed.
253 */
254 if (align) {
255 aligned = (slob_t *)
256 (ALIGN((unsigned long)cur + align_offset, align)
257 - align_offset);
258 delta = aligned - cur;
259 }
260 if (avail >= units + delta) { /* room enough? */
261 slob_t *next;
262
263 if (delta) { /* need to fragment head to align? */
264 next = slob_next(cur);
265 set_slob(aligned, avail - delta, next);
266 set_slob(cur, delta, aligned);
267 prev = cur;
268 cur = aligned;
269 avail = slob_units(cur);
270 }
271
272 next = slob_next(cur);
273 if (avail == units) { /* exact fit? unlink. */
274 if (prev)
275 set_slob(prev, slob_units(prev), next);
276 else
277 sp->freelist = next;
278 } else { /* fragment */
279 if (prev)
280 set_slob(prev, slob_units(prev), cur + units);
281 else
282 sp->freelist = cur + units;
283 set_slob(cur + units, avail - units, next);
284 }
285
286 sp->units -= units;
287 if (!sp->units) {
288 clear_slob_page_free(sp);
289 *page_removed_from_list = true;
290 }
291 return cur;
292 }
293 if (slob_last(cur))
294 return NULL;
295 }
296 }
297
298 /*
299 * slob_alloc: entry point into the slob allocator.
300 */
slob_alloc(size_t size,gfp_t gfp,int align,int node,int align_offset)301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
302 int align_offset)
303 {
304 struct folio *folio;
305 struct slab *sp;
306 struct list_head *slob_list;
307 slob_t *b = NULL;
308 unsigned long flags;
309 bool _unused;
310
311 if (size < SLOB_BREAK1)
312 slob_list = &free_slob_small;
313 else if (size < SLOB_BREAK2)
314 slob_list = &free_slob_medium;
315 else
316 slob_list = &free_slob_large;
317
318 spin_lock_irqsave(&slob_lock, flags);
319 /* Iterate through each partially free page, try to find room */
320 list_for_each_entry(sp, slob_list, slab_list) {
321 bool page_removed_from_list = false;
322 #ifdef CONFIG_NUMA
323 /*
324 * If there's a node specification, search for a partial
325 * page with a matching node id in the freelist.
326 */
327 if (node != NUMA_NO_NODE && slab_nid(sp) != node)
328 continue;
329 #endif
330 /* Enough room on this page? */
331 if (sp->units < SLOB_UNITS(size))
332 continue;
333
334 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
335 if (!b)
336 continue;
337
338 /*
339 * If slob_page_alloc() removed sp from the list then we
340 * cannot call list functions on sp. If so allocation
341 * did not fragment the page anyway so optimisation is
342 * unnecessary.
343 */
344 if (!page_removed_from_list) {
345 /*
346 * Improve fragment distribution and reduce our average
347 * search time by starting our next search here. (see
348 * Knuth vol 1, sec 2.5, pg 449)
349 */
350 if (!list_is_first(&sp->slab_list, slob_list))
351 list_rotate_to_front(&sp->slab_list, slob_list);
352 }
353 break;
354 }
355 spin_unlock_irqrestore(&slob_lock, flags);
356
357 /* Not enough space: must allocate a new page */
358 if (!b) {
359 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
360 if (!b)
361 return NULL;
362 folio = virt_to_folio(b);
363 __folio_set_slab(folio);
364 sp = folio_slab(folio);
365
366 spin_lock_irqsave(&slob_lock, flags);
367 sp->units = SLOB_UNITS(PAGE_SIZE);
368 sp->freelist = b;
369 INIT_LIST_HEAD(&sp->slab_list);
370 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
371 set_slob_page_free(sp, slob_list);
372 b = slob_page_alloc(sp, size, align, align_offset, &_unused);
373 BUG_ON(!b);
374 spin_unlock_irqrestore(&slob_lock, flags);
375 }
376 if (unlikely(gfp & __GFP_ZERO))
377 memset(b, 0, size);
378 return b;
379 }
380
381 /*
382 * slob_free: entry point into the slob allocator.
383 */
slob_free(void * block,int size)384 static void slob_free(void *block, int size)
385 {
386 struct slab *sp;
387 slob_t *prev, *next, *b = (slob_t *)block;
388 slobidx_t units;
389 unsigned long flags;
390 struct list_head *slob_list;
391
392 if (unlikely(ZERO_OR_NULL_PTR(block)))
393 return;
394 BUG_ON(!size);
395
396 sp = virt_to_slab(block);
397 units = SLOB_UNITS(size);
398
399 spin_lock_irqsave(&slob_lock, flags);
400
401 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
402 /* Go directly to page allocator. Do not pass slob allocator */
403 if (slob_page_free(sp))
404 clear_slob_page_free(sp);
405 spin_unlock_irqrestore(&slob_lock, flags);
406 __folio_clear_slab(slab_folio(sp));
407 slob_free_pages(b, 0);
408 return;
409 }
410
411 if (!slob_page_free(sp)) {
412 /* This slob page is about to become partially free. Easy! */
413 sp->units = units;
414 sp->freelist = b;
415 set_slob(b, units,
416 (void *)((unsigned long)(b +
417 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
418 if (size < SLOB_BREAK1)
419 slob_list = &free_slob_small;
420 else if (size < SLOB_BREAK2)
421 slob_list = &free_slob_medium;
422 else
423 slob_list = &free_slob_large;
424 set_slob_page_free(sp, slob_list);
425 goto out;
426 }
427
428 /*
429 * Otherwise the page is already partially free, so find reinsertion
430 * point.
431 */
432 sp->units += units;
433
434 if (b < (slob_t *)sp->freelist) {
435 if (b + units == sp->freelist) {
436 units += slob_units(sp->freelist);
437 sp->freelist = slob_next(sp->freelist);
438 }
439 set_slob(b, units, sp->freelist);
440 sp->freelist = b;
441 } else {
442 prev = sp->freelist;
443 next = slob_next(prev);
444 while (b > next) {
445 prev = next;
446 next = slob_next(prev);
447 }
448
449 if (!slob_last(prev) && b + units == next) {
450 units += slob_units(next);
451 set_slob(b, units, slob_next(next));
452 } else
453 set_slob(b, units, next);
454
455 if (prev + slob_units(prev) == b) {
456 units = slob_units(b) + slob_units(prev);
457 set_slob(prev, units, slob_next(b));
458 } else
459 set_slob(prev, slob_units(prev), b);
460 }
461 out:
462 spin_unlock_irqrestore(&slob_lock, flags);
463 }
464
465 #ifdef CONFIG_PRINTK
__kmem_obj_info(struct kmem_obj_info * kpp,void * object,struct slab * slab)466 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
467 {
468 kpp->kp_ptr = object;
469 kpp->kp_slab = slab;
470 }
471 #endif
472
473 /*
474 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
475 */
476
477 static __always_inline void *
__do_kmalloc_node(size_t size,gfp_t gfp,int node,unsigned long caller)478 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
479 {
480 unsigned int *m;
481 unsigned int minalign;
482 void *ret;
483
484 minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
485 arch_slab_minalign());
486 gfp &= gfp_allowed_mask;
487
488 might_alloc(gfp);
489
490 if (size < PAGE_SIZE - minalign) {
491 int align = minalign;
492
493 /*
494 * For power of two sizes, guarantee natural alignment for
495 * kmalloc()'d objects.
496 */
497 if (is_power_of_2(size))
498 align = max_t(unsigned int, minalign, size);
499
500 if (!size)
501 return ZERO_SIZE_PTR;
502
503 m = slob_alloc(size + minalign, gfp, align, node, minalign);
504
505 if (!m)
506 return NULL;
507 *m = size;
508 ret = (void *)m + minalign;
509
510 trace_kmalloc(caller, ret, size, size + minalign, gfp, node);
511 } else {
512 unsigned int order = get_order(size);
513
514 if (likely(order))
515 gfp |= __GFP_COMP;
516 ret = slob_new_pages(gfp, order, node);
517
518 trace_kmalloc(caller, ret, size, PAGE_SIZE << order, gfp, node);
519 }
520
521 kmemleak_alloc(ret, size, 1, gfp);
522 return ret;
523 }
524
__kmalloc(size_t size,gfp_t gfp)525 void *__kmalloc(size_t size, gfp_t gfp)
526 {
527 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
528 }
529 EXPORT_SYMBOL(__kmalloc);
530
__kmalloc_node_track_caller(size_t size,gfp_t gfp,int node,unsigned long caller)531 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
532 int node, unsigned long caller)
533 {
534 return __do_kmalloc_node(size, gfp, node, caller);
535 }
536 EXPORT_SYMBOL(__kmalloc_node_track_caller);
537
kfree(const void * block)538 void kfree(const void *block)
539 {
540 struct folio *sp;
541
542 trace_kfree(_RET_IP_, block);
543
544 if (unlikely(ZERO_OR_NULL_PTR(block)))
545 return;
546 kmemleak_free(block);
547
548 sp = virt_to_folio(block);
549 if (folio_test_slab(sp)) {
550 unsigned int align = max_t(unsigned int,
551 ARCH_KMALLOC_MINALIGN,
552 arch_slab_minalign());
553 unsigned int *m = (unsigned int *)(block - align);
554
555 slob_free(m, *m + align);
556 } else {
557 unsigned int order = folio_order(sp);
558
559 mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
560 -(PAGE_SIZE << order));
561 __free_pages(folio_page(sp, 0), order);
562
563 }
564 }
565 EXPORT_SYMBOL(kfree);
566
kmalloc_size_roundup(size_t size)567 size_t kmalloc_size_roundup(size_t size)
568 {
569 /* Short-circuit the 0 size case. */
570 if (unlikely(size == 0))
571 return 0;
572 /* Short-circuit saturated "too-large" case. */
573 if (unlikely(size == SIZE_MAX))
574 return SIZE_MAX;
575
576 return ALIGN(size, ARCH_KMALLOC_MINALIGN);
577 }
578
579 EXPORT_SYMBOL(kmalloc_size_roundup);
580
581 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
__ksize(const void * block)582 size_t __ksize(const void *block)
583 {
584 struct folio *folio;
585 unsigned int align;
586 unsigned int *m;
587
588 BUG_ON(!block);
589 if (unlikely(block == ZERO_SIZE_PTR))
590 return 0;
591
592 folio = virt_to_folio(block);
593 if (unlikely(!folio_test_slab(folio)))
594 return folio_size(folio);
595
596 align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
597 arch_slab_minalign());
598 m = (unsigned int *)(block - align);
599 return SLOB_UNITS(*m) * SLOB_UNIT;
600 }
601
__kmem_cache_create(struct kmem_cache * c,slab_flags_t flags)602 int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
603 {
604 if (flags & SLAB_TYPESAFE_BY_RCU) {
605 /* leave room for rcu footer at the end of object */
606 c->size += sizeof(struct slob_rcu);
607 }
608
609 /* Actual size allocated */
610 c->size = SLOB_UNITS(c->size) * SLOB_UNIT;
611 c->flags = flags;
612 return 0;
613 }
614
slob_alloc_node(struct kmem_cache * c,gfp_t flags,int node)615 static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
616 {
617 void *b;
618
619 flags &= gfp_allowed_mask;
620
621 might_alloc(flags);
622
623 if (c->size < PAGE_SIZE) {
624 b = slob_alloc(c->size, flags, c->align, node, 0);
625 trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
626 } else {
627 b = slob_new_pages(flags, get_order(c->size), node);
628 trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
629 }
630
631 if (b && c->ctor) {
632 WARN_ON_ONCE(flags & __GFP_ZERO);
633 c->ctor(b);
634 }
635
636 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
637 return b;
638 }
639
kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)640 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
641 {
642 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
643 }
644 EXPORT_SYMBOL(kmem_cache_alloc);
645
646
kmem_cache_alloc_lru(struct kmem_cache * cachep,struct list_lru * lru,gfp_t flags)647 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags)
648 {
649 return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
650 }
651 EXPORT_SYMBOL(kmem_cache_alloc_lru);
652
__kmalloc_node(size_t size,gfp_t gfp,int node)653 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
654 {
655 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
656 }
657 EXPORT_SYMBOL(__kmalloc_node);
658
kmem_cache_alloc_node(struct kmem_cache * cachep,gfp_t gfp,int node)659 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
660 {
661 return slob_alloc_node(cachep, gfp, node);
662 }
663 EXPORT_SYMBOL(kmem_cache_alloc_node);
664
__kmem_cache_free(void * b,int size)665 static void __kmem_cache_free(void *b, int size)
666 {
667 if (size < PAGE_SIZE)
668 slob_free(b, size);
669 else
670 slob_free_pages(b, get_order(size));
671 }
672
kmem_rcu_free(struct rcu_head * head)673 static void kmem_rcu_free(struct rcu_head *head)
674 {
675 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
676 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
677
678 __kmem_cache_free(b, slob_rcu->size);
679 }
680
kmem_cache_free(struct kmem_cache * c,void * b)681 void kmem_cache_free(struct kmem_cache *c, void *b)
682 {
683 kmemleak_free_recursive(b, c->flags);
684 trace_kmem_cache_free(_RET_IP_, b, c);
685 if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
686 struct slob_rcu *slob_rcu;
687 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
688 slob_rcu->size = c->size;
689 call_rcu(&slob_rcu->head, kmem_rcu_free);
690 } else {
691 __kmem_cache_free(b, c->size);
692 }
693 }
694 EXPORT_SYMBOL(kmem_cache_free);
695
kmem_cache_free_bulk(struct kmem_cache * s,size_t nr,void ** p)696 void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
697 {
698 size_t i;
699
700 for (i = 0; i < nr; i++) {
701 if (s)
702 kmem_cache_free(s, p[i]);
703 else
704 kfree(p[i]);
705 }
706 }
707 EXPORT_SYMBOL(kmem_cache_free_bulk);
708
kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t nr,void ** p)709 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
710 void **p)
711 {
712 size_t i;
713
714 for (i = 0; i < nr; i++) {
715 void *x = p[i] = kmem_cache_alloc(s, flags);
716
717 if (!x) {
718 kmem_cache_free_bulk(s, i, p);
719 return 0;
720 }
721 }
722 return i;
723 }
724 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
725
__kmem_cache_shutdown(struct kmem_cache * c)726 int __kmem_cache_shutdown(struct kmem_cache *c)
727 {
728 /* No way to check for remaining objects */
729 return 0;
730 }
731
__kmem_cache_release(struct kmem_cache * c)732 void __kmem_cache_release(struct kmem_cache *c)
733 {
734 }
735
__kmem_cache_shrink(struct kmem_cache * d)736 int __kmem_cache_shrink(struct kmem_cache *d)
737 {
738 return 0;
739 }
740
741 static struct kmem_cache kmem_cache_boot = {
742 .name = "kmem_cache",
743 .size = sizeof(struct kmem_cache),
744 .flags = SLAB_PANIC,
745 .align = ARCH_KMALLOC_MINALIGN,
746 };
747
kmem_cache_init(void)748 void __init kmem_cache_init(void)
749 {
750 kmem_cache = &kmem_cache_boot;
751 slab_state = UP;
752 }
753
kmem_cache_init_late(void)754 void __init kmem_cache_init_late(void)
755 {
756 slab_state = FULL;
757 }
758