1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SLOB Allocator: Simple List Of Blocks
4  *
5  * Matt Mackall <mpm@selenic.com> 12/30/03
6  *
7  * NUMA support by Paul Mundt, 2007.
8  *
9  * How SLOB works:
10  *
11  * The core of SLOB is a traditional K&R style heap allocator, with
12  * support for returning aligned objects. The granularity of this
13  * allocator is as little as 2 bytes, however typically most architectures
14  * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15  *
16  * The slob heap is a set of linked list of pages from alloc_pages(),
17  * and within each page, there is a singly-linked list of free blocks
18  * (slob_t). The heap is grown on demand. To reduce fragmentation,
19  * heap pages are segregated into three lists, with objects less than
20  * 256 bytes, objects less than 1024 bytes, and all other objects.
21  *
22  * Allocation from heap involves first searching for a page with
23  * sufficient free blocks (using a next-fit-like approach) followed by
24  * a first-fit scan of the page. Deallocation inserts objects back
25  * into the free list in address order, so this is effectively an
26  * address-ordered first fit.
27  *
28  * Above this is an implementation of kmalloc/kfree. Blocks returned
29  * from kmalloc are prepended with a 4-byte header with the kmalloc size.
30  * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
31  * alloc_pages() directly, allocating compound pages so the page order
32  * does not have to be separately tracked.
33  * These objects are detected in kfree() because PageSlab()
34  * is false for them.
35  *
36  * SLAB is emulated on top of SLOB by simply calling constructors and
37  * destructors for every SLAB allocation. Objects are returned with the
38  * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39  * case the low-level allocator will fragment blocks to create the proper
40  * alignment. Again, objects of page-size or greater are allocated by
41  * calling alloc_pages(). As SLAB objects know their size, no separate
42  * size bookkeeping is necessary and there is essentially no allocation
43  * space overhead, and compound pages aren't needed for multi-page
44  * allocations.
45  *
46  * NUMA support in SLOB is fairly simplistic, pushing most of the real
47  * logic down to the page allocator, and simply doing the node accounting
48  * on the upper levels. In the event that a node id is explicitly
49  * provided, __alloc_pages_node() with the specified node id is used
50  * instead. The common case (or when the node id isn't explicitly provided)
51  * will default to the current node, as per numa_node_id().
52  *
53  * Node aware pages are still inserted in to the global freelist, and
54  * these are scanned for by matching against the node id encoded in the
55  * page flags. As a result, block allocations that can be satisfied from
56  * the freelist will only be done so on pages residing on the same node,
57  * in order to prevent random node placement.
58  */
59 
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
62 
63 #include <linux/mm.h>
64 #include <linux/swap.h> /* struct reclaim_state */
65 #include <linux/cache.h>
66 #include <linux/init.h>
67 #include <linux/export.h>
68 #include <linux/rcupdate.h>
69 #include <linux/list.h>
70 #include <linux/kmemleak.h>
71 
72 #include <trace/events/kmem.h>
73 
74 #include <linux/atomic.h>
75 
76 #include "slab.h"
77 /*
78  * slob_block has a field 'units', which indicates size of block if +ve,
79  * or offset of next block if -ve (in SLOB_UNITs).
80  *
81  * Free blocks of size 1 unit simply contain the offset of the next block.
82  * Those with larger size contain their size in the first SLOB_UNIT of
83  * memory, and the offset of the next free block in the second SLOB_UNIT.
84  */
85 #if PAGE_SIZE <= (32767 * 2)
86 typedef s16 slobidx_t;
87 #else
88 typedef s32 slobidx_t;
89 #endif
90 
91 struct slob_block {
92 	slobidx_t units;
93 };
94 typedef struct slob_block slob_t;
95 
96 /*
97  * All partially free slob pages go on these lists.
98  */
99 #define SLOB_BREAK1 256
100 #define SLOB_BREAK2 1024
101 static LIST_HEAD(free_slob_small);
102 static LIST_HEAD(free_slob_medium);
103 static LIST_HEAD(free_slob_large);
104 
105 /*
106  * slob_page_free: true for pages on free_slob_pages list.
107  */
slob_page_free(struct page * sp)108 static inline int slob_page_free(struct page *sp)
109 {
110 	return PageSlobFree(sp);
111 }
112 
set_slob_page_free(struct page * sp,struct list_head * list)113 static void set_slob_page_free(struct page *sp, struct list_head *list)
114 {
115 	list_add(&sp->lru, list);
116 	__SetPageSlobFree(sp);
117 }
118 
clear_slob_page_free(struct page * sp)119 static inline void clear_slob_page_free(struct page *sp)
120 {
121 	list_del(&sp->lru);
122 	__ClearPageSlobFree(sp);
123 }
124 
125 #define SLOB_UNIT sizeof(slob_t)
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
127 
128 /*
129  * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130  * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
131  * the block using call_rcu.
132  */
133 struct slob_rcu {
134 	struct rcu_head head;
135 	int size;
136 };
137 
138 /*
139  * slob_lock protects all slob allocator structures.
140  */
141 static DEFINE_SPINLOCK(slob_lock);
142 
143 /*
144  * Encode the given size and next info into a free slob block s.
145  */
set_slob(slob_t * s,slobidx_t size,slob_t * next)146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147 {
148 	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 	slobidx_t offset = next - base;
150 
151 	if (size > 1) {
152 		s[0].units = size;
153 		s[1].units = offset;
154 	} else
155 		s[0].units = -offset;
156 }
157 
158 /*
159  * Return the size of a slob block.
160  */
slob_units(slob_t * s)161 static slobidx_t slob_units(slob_t *s)
162 {
163 	if (s->units > 0)
164 		return s->units;
165 	return 1;
166 }
167 
168 /*
169  * Return the next free slob block pointer after this one.
170  */
slob_next(slob_t * s)171 static slob_t *slob_next(slob_t *s)
172 {
173 	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174 	slobidx_t next;
175 
176 	if (s[0].units < 0)
177 		next = -s[0].units;
178 	else
179 		next = s[1].units;
180 	return base+next;
181 }
182 
183 /*
184  * Returns true if s is the last free block in its page.
185  */
slob_last(slob_t * s)186 static int slob_last(slob_t *s)
187 {
188 	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189 }
190 
slob_new_pages(gfp_t gfp,int order,int node)191 static void *slob_new_pages(gfp_t gfp, int order, int node)
192 {
193 	void *page;
194 
195 #ifdef CONFIG_NUMA
196 	if (node != NUMA_NO_NODE)
197 		page = __alloc_pages_node(node, gfp, order);
198 	else
199 #endif
200 		page = alloc_pages(gfp, order);
201 
202 	if (!page)
203 		return NULL;
204 
205 	return page_address(page);
206 }
207 
slob_free_pages(void * b,int order)208 static void slob_free_pages(void *b, int order)
209 {
210 	if (current->reclaim_state)
211 		current->reclaim_state->reclaimed_slab += 1 << order;
212 	free_pages((unsigned long)b, order);
213 }
214 
215 /*
216  * Allocate a slob block within a given slob_page sp.
217  */
slob_page_alloc(struct page * sp,size_t size,int align)218 static void *slob_page_alloc(struct page *sp, size_t size, int align)
219 {
220 	slob_t *prev, *cur, *aligned = NULL;
221 	int delta = 0, units = SLOB_UNITS(size);
222 
223 	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
224 		slobidx_t avail = slob_units(cur);
225 
226 		if (align) {
227 			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
228 			delta = aligned - cur;
229 		}
230 		if (avail >= units + delta) { /* room enough? */
231 			slob_t *next;
232 
233 			if (delta) { /* need to fragment head to align? */
234 				next = slob_next(cur);
235 				set_slob(aligned, avail - delta, next);
236 				set_slob(cur, delta, aligned);
237 				prev = cur;
238 				cur = aligned;
239 				avail = slob_units(cur);
240 			}
241 
242 			next = slob_next(cur);
243 			if (avail == units) { /* exact fit? unlink. */
244 				if (prev)
245 					set_slob(prev, slob_units(prev), next);
246 				else
247 					sp->freelist = next;
248 			} else { /* fragment */
249 				if (prev)
250 					set_slob(prev, slob_units(prev), cur + units);
251 				else
252 					sp->freelist = cur + units;
253 				set_slob(cur + units, avail - units, next);
254 			}
255 
256 			sp->units -= units;
257 			if (!sp->units)
258 				clear_slob_page_free(sp);
259 			return cur;
260 		}
261 		if (slob_last(cur))
262 			return NULL;
263 	}
264 }
265 
266 /*
267  * slob_alloc: entry point into the slob allocator.
268  */
slob_alloc(size_t size,gfp_t gfp,int align,int node)269 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
270 {
271 	struct page *sp;
272 	struct list_head *prev;
273 	struct list_head *slob_list;
274 	slob_t *b = NULL;
275 	unsigned long flags;
276 
277 	if (size < SLOB_BREAK1)
278 		slob_list = &free_slob_small;
279 	else if (size < SLOB_BREAK2)
280 		slob_list = &free_slob_medium;
281 	else
282 		slob_list = &free_slob_large;
283 
284 	spin_lock_irqsave(&slob_lock, flags);
285 	/* Iterate through each partially free page, try to find room */
286 	list_for_each_entry(sp, slob_list, lru) {
287 #ifdef CONFIG_NUMA
288 		/*
289 		 * If there's a node specification, search for a partial
290 		 * page with a matching node id in the freelist.
291 		 */
292 		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
293 			continue;
294 #endif
295 		/* Enough room on this page? */
296 		if (sp->units < SLOB_UNITS(size))
297 			continue;
298 
299 		/* Attempt to alloc */
300 		prev = sp->lru.prev;
301 		b = slob_page_alloc(sp, size, align);
302 		if (!b)
303 			continue;
304 
305 		/* Improve fragment distribution and reduce our average
306 		 * search time by starting our next search here. (see
307 		 * Knuth vol 1, sec 2.5, pg 449) */
308 		if (prev != slob_list->prev &&
309 				slob_list->next != prev->next)
310 			list_move_tail(slob_list, prev->next);
311 		break;
312 	}
313 	spin_unlock_irqrestore(&slob_lock, flags);
314 
315 	/* Not enough space: must allocate a new page */
316 	if (!b) {
317 		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
318 		if (!b)
319 			return NULL;
320 		sp = virt_to_page(b);
321 		__SetPageSlab(sp);
322 
323 		spin_lock_irqsave(&slob_lock, flags);
324 		sp->units = SLOB_UNITS(PAGE_SIZE);
325 		sp->freelist = b;
326 		INIT_LIST_HEAD(&sp->lru);
327 		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
328 		set_slob_page_free(sp, slob_list);
329 		b = slob_page_alloc(sp, size, align);
330 		BUG_ON(!b);
331 		spin_unlock_irqrestore(&slob_lock, flags);
332 	}
333 	if (unlikely(gfp & __GFP_ZERO))
334 		memset(b, 0, size);
335 	return b;
336 }
337 
338 /*
339  * slob_free: entry point into the slob allocator.
340  */
slob_free(void * block,int size)341 static void slob_free(void *block, int size)
342 {
343 	struct page *sp;
344 	slob_t *prev, *next, *b = (slob_t *)block;
345 	slobidx_t units;
346 	unsigned long flags;
347 	struct list_head *slob_list;
348 
349 	if (unlikely(ZERO_OR_NULL_PTR(block)))
350 		return;
351 	BUG_ON(!size);
352 
353 	sp = virt_to_page(block);
354 	units = SLOB_UNITS(size);
355 
356 	spin_lock_irqsave(&slob_lock, flags);
357 
358 	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
359 		/* Go directly to page allocator. Do not pass slob allocator */
360 		if (slob_page_free(sp))
361 			clear_slob_page_free(sp);
362 		spin_unlock_irqrestore(&slob_lock, flags);
363 		__ClearPageSlab(sp);
364 		page_mapcount_reset(sp);
365 		slob_free_pages(b, 0);
366 		return;
367 	}
368 
369 	if (!slob_page_free(sp)) {
370 		/* This slob page is about to become partially free. Easy! */
371 		sp->units = units;
372 		sp->freelist = b;
373 		set_slob(b, units,
374 			(void *)((unsigned long)(b +
375 					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
376 		if (size < SLOB_BREAK1)
377 			slob_list = &free_slob_small;
378 		else if (size < SLOB_BREAK2)
379 			slob_list = &free_slob_medium;
380 		else
381 			slob_list = &free_slob_large;
382 		set_slob_page_free(sp, slob_list);
383 		goto out;
384 	}
385 
386 	/*
387 	 * Otherwise the page is already partially free, so find reinsertion
388 	 * point.
389 	 */
390 	sp->units += units;
391 
392 	if (b < (slob_t *)sp->freelist) {
393 		if (b + units == sp->freelist) {
394 			units += slob_units(sp->freelist);
395 			sp->freelist = slob_next(sp->freelist);
396 		}
397 		set_slob(b, units, sp->freelist);
398 		sp->freelist = b;
399 	} else {
400 		prev = sp->freelist;
401 		next = slob_next(prev);
402 		while (b > next) {
403 			prev = next;
404 			next = slob_next(prev);
405 		}
406 
407 		if (!slob_last(prev) && b + units == next) {
408 			units += slob_units(next);
409 			set_slob(b, units, slob_next(next));
410 		} else
411 			set_slob(b, units, next);
412 
413 		if (prev + slob_units(prev) == b) {
414 			units = slob_units(b) + slob_units(prev);
415 			set_slob(prev, units, slob_next(b));
416 		} else
417 			set_slob(prev, slob_units(prev), b);
418 	}
419 out:
420 	spin_unlock_irqrestore(&slob_lock, flags);
421 }
422 
423 /*
424  * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
425  */
426 
427 static __always_inline void *
__do_kmalloc_node(size_t size,gfp_t gfp,int node,unsigned long caller)428 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
429 {
430 	unsigned int *m;
431 	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
432 	void *ret;
433 
434 	gfp &= gfp_allowed_mask;
435 
436 	fs_reclaim_acquire(gfp);
437 	fs_reclaim_release(gfp);
438 
439 	if (size < PAGE_SIZE - align) {
440 		if (!size)
441 			return ZERO_SIZE_PTR;
442 
443 		m = slob_alloc(size + align, gfp, align, node);
444 
445 		if (!m)
446 			return NULL;
447 		*m = size;
448 		ret = (void *)m + align;
449 
450 		trace_kmalloc_node(caller, ret,
451 				   size, size + align, gfp, node);
452 	} else {
453 		unsigned int order = get_order(size);
454 
455 		if (likely(order))
456 			gfp |= __GFP_COMP;
457 		ret = slob_new_pages(gfp, order, node);
458 
459 		trace_kmalloc_node(caller, ret,
460 				   size, PAGE_SIZE << order, gfp, node);
461 	}
462 
463 	kmemleak_alloc(ret, size, 1, gfp);
464 	return ret;
465 }
466 
__kmalloc(size_t size,gfp_t gfp)467 void *__kmalloc(size_t size, gfp_t gfp)
468 {
469 	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
470 }
471 EXPORT_SYMBOL(__kmalloc);
472 
__kmalloc_track_caller(size_t size,gfp_t gfp,unsigned long caller)473 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
474 {
475 	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
476 }
477 
478 #ifdef CONFIG_NUMA
__kmalloc_node_track_caller(size_t size,gfp_t gfp,int node,unsigned long caller)479 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
480 					int node, unsigned long caller)
481 {
482 	return __do_kmalloc_node(size, gfp, node, caller);
483 }
484 #endif
485 
kfree(const void * block)486 void kfree(const void *block)
487 {
488 	struct page *sp;
489 
490 	trace_kfree(_RET_IP_, block);
491 
492 	if (unlikely(ZERO_OR_NULL_PTR(block)))
493 		return;
494 	kmemleak_free(block);
495 
496 	sp = virt_to_page(block);
497 	if (PageSlab(sp)) {
498 		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
499 		unsigned int *m = (unsigned int *)(block - align);
500 		slob_free(m, *m + align);
501 	} else
502 		__free_pages(sp, compound_order(sp));
503 }
504 EXPORT_SYMBOL(kfree);
505 
506 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
ksize(const void * block)507 size_t ksize(const void *block)
508 {
509 	struct page *sp;
510 	int align;
511 	unsigned int *m;
512 
513 	BUG_ON(!block);
514 	if (unlikely(block == ZERO_SIZE_PTR))
515 		return 0;
516 
517 	sp = virt_to_page(block);
518 	if (unlikely(!PageSlab(sp)))
519 		return PAGE_SIZE << compound_order(sp);
520 
521 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
522 	m = (unsigned int *)(block - align);
523 	return SLOB_UNITS(*m) * SLOB_UNIT;
524 }
525 EXPORT_SYMBOL(ksize);
526 
__kmem_cache_create(struct kmem_cache * c,slab_flags_t flags)527 int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
528 {
529 	if (flags & SLAB_TYPESAFE_BY_RCU) {
530 		/* leave room for rcu footer at the end of object */
531 		c->size += sizeof(struct slob_rcu);
532 	}
533 	c->flags = flags;
534 	return 0;
535 }
536 
slob_alloc_node(struct kmem_cache * c,gfp_t flags,int node)537 static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
538 {
539 	void *b;
540 
541 	flags &= gfp_allowed_mask;
542 
543 	fs_reclaim_acquire(flags);
544 	fs_reclaim_release(flags);
545 
546 	if (c->size < PAGE_SIZE) {
547 		b = slob_alloc(c->size, flags, c->align, node);
548 		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
549 					    SLOB_UNITS(c->size) * SLOB_UNIT,
550 					    flags, node);
551 	} else {
552 		b = slob_new_pages(flags, get_order(c->size), node);
553 		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
554 					    PAGE_SIZE << get_order(c->size),
555 					    flags, node);
556 	}
557 
558 	if (b && c->ctor) {
559 		WARN_ON_ONCE(flags & __GFP_ZERO);
560 		c->ctor(b);
561 	}
562 
563 	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
564 	return b;
565 }
566 
kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)567 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
568 {
569 	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
570 }
571 EXPORT_SYMBOL(kmem_cache_alloc);
572 
573 #ifdef CONFIG_NUMA
__kmalloc_node(size_t size,gfp_t gfp,int node)574 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
575 {
576 	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
577 }
578 EXPORT_SYMBOL(__kmalloc_node);
579 
kmem_cache_alloc_node(struct kmem_cache * cachep,gfp_t gfp,int node)580 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
581 {
582 	return slob_alloc_node(cachep, gfp, node);
583 }
584 EXPORT_SYMBOL(kmem_cache_alloc_node);
585 #endif
586 
__kmem_cache_free(void * b,int size)587 static void __kmem_cache_free(void *b, int size)
588 {
589 	if (size < PAGE_SIZE)
590 		slob_free(b, size);
591 	else
592 		slob_free_pages(b, get_order(size));
593 }
594 
kmem_rcu_free(struct rcu_head * head)595 static void kmem_rcu_free(struct rcu_head *head)
596 {
597 	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
598 	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
599 
600 	__kmem_cache_free(b, slob_rcu->size);
601 }
602 
kmem_cache_free(struct kmem_cache * c,void * b)603 void kmem_cache_free(struct kmem_cache *c, void *b)
604 {
605 	kmemleak_free_recursive(b, c->flags);
606 	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
607 		struct slob_rcu *slob_rcu;
608 		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
609 		slob_rcu->size = c->size;
610 		call_rcu(&slob_rcu->head, kmem_rcu_free);
611 	} else {
612 		__kmem_cache_free(b, c->size);
613 	}
614 
615 	trace_kmem_cache_free(_RET_IP_, b);
616 }
617 EXPORT_SYMBOL(kmem_cache_free);
618 
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)619 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
620 {
621 	__kmem_cache_free_bulk(s, size, p);
622 }
623 EXPORT_SYMBOL(kmem_cache_free_bulk);
624 
kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)625 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
626 								void **p)
627 {
628 	return __kmem_cache_alloc_bulk(s, flags, size, p);
629 }
630 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
631 
__kmem_cache_shutdown(struct kmem_cache * c)632 int __kmem_cache_shutdown(struct kmem_cache *c)
633 {
634 	/* No way to check for remaining objects */
635 	return 0;
636 }
637 
__kmem_cache_release(struct kmem_cache * c)638 void __kmem_cache_release(struct kmem_cache *c)
639 {
640 }
641 
__kmem_cache_shrink(struct kmem_cache * d)642 int __kmem_cache_shrink(struct kmem_cache *d)
643 {
644 	return 0;
645 }
646 
647 struct kmem_cache kmem_cache_boot = {
648 	.name = "kmem_cache",
649 	.size = sizeof(struct kmem_cache),
650 	.flags = SLAB_PANIC,
651 	.align = ARCH_KMALLOC_MINALIGN,
652 };
653 
kmem_cache_init(void)654 void __init kmem_cache_init(void)
655 {
656 	kmem_cache = &kmem_cache_boot;
657 	slab_state = UP;
658 }
659 
kmem_cache_init_late(void)660 void __init kmem_cache_init_late(void)
661 {
662 	slab_state = FULL;
663 }
664