1 /*
2 ** Two Level Segregated Fit memory allocator, version 3.1.
3 ** Written by Matthew Conte
4 ** http://tlsf.baisoku.org
5 **
6 ** Based on the original documentation by Miguel Masmano:
7 ** http://www.gii.upv.es/tlsf/main/docs
8 **
9 ** This implementation was written to the specification
10 ** of the document, therefore no GPL restrictions apply.
11 **
12 ** Copyright (c) 2006-2016, Matthew Conte
13 ** All rights reserved.
14 **
15 ** Redistribution and use in source and binary forms, with or without
16 ** modification, are permitted provided that the following conditions are met:
17 ** * Redistributions of source code must retain the above copyright
18 ** notice, this list of conditions and the following disclaimer.
19 ** * Redistributions in binary form must reproduce the above copyright
20 ** notice, this list of conditions and the following disclaimer in the
21 ** documentation and/or other materials provided with the distribution.
22 ** * Neither the name of the copyright holder nor the
23 ** names of its contributors may be used to endorse or promote products
24 ** derived from this software without specific prior written permission.
25 **
26 ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
27 ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 ** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
30 ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
33 ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37 #include "multi_heap_config.h"
38 #include "multi_heap.h"
39 #include "multi_heap_internal.h"
40 #include "heap_tlsf_config.h"
41 #include "heap_tlsf.h"
42
43 /*
44 ** Architecture-specific bit manipulation routines.
45 **
46 ** TLSF achieves O(1) cost for malloc and free operations by limiting
47 ** the search for a free block to a free list of guaranteed size
48 ** adequate to fulfill the request, combined with efficient free list
49 ** queries using bitmasks and architecture-specific bit-manipulation
50 ** routines.
51 **
52 ** Most modern processors provide instructions to count leading zeroes
53 ** in a word, find the lowest and highest set bit, etc. These
54 ** specific implementations will be used when available, falling back
55 ** to a reasonably efficient generic implementation.
56 **
57 ** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
58 ** ffs/fls return 1-32 by default, returning 0 for error.
59 */
tlsf_ffs(unsigned int word)60 static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word)
61 {
62 const unsigned int reverse = word & (~word + 1);
63 const int bit = 32 - __builtin_clz(reverse);
64 return bit - 1;
65 }
66
tlsf_fls(unsigned int word)67 static inline __attribute__((__always_inline__)) int tlsf_fls(unsigned int word)
68 {
69 const int bit = word ? 32 - __builtin_clz(word) : 0;
70 return bit - 1;
71 }
72
73 /*
74 ** Set assert macro, if it has not been provided by the user.
75 */
76 #if !defined (tlsf_assert)
77 #define tlsf_assert assert
78 #endif
79
80 /*
81 ** Static assertion mechanism.
82 */
83 #define _tlsf_glue2(x, y) x ## y
84 #define _tlsf_glue(x, y) _tlsf_glue2(x, y)
85 #define tlsf_static_assert(exp) \
86 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
87
88 /* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
89 tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
90 tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
91 tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
92
93 /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
94 tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
95
96 /* Ensure we've properly tuned our sizes. */
97 tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
98
align_up(size_t x,size_t align)99 static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align)
100 {
101 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
102 return (x + (align - 1)) & ~(align - 1);
103 }
104
align_down(size_t x,size_t align)105 static inline __attribute__((__always_inline__)) size_t align_down(size_t x, size_t align)
106 {
107 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
108 return x - (x & (align - 1));
109 }
110
align_ptr(const void * ptr,size_t align)111 static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr, size_t align)
112 {
113 const tlsfptr_t aligned =
114 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
115 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
116 return tlsf_cast(void*, aligned);
117 }
118
119 /*
120 ** Adjust an allocation size to be aligned to word size, and no smaller
121 ** than internal minimum.
122 */
adjust_request_size(size_t size,size_t align)123 static inline __attribute__((__always_inline__)) size_t adjust_request_size(size_t size, size_t align)
124 {
125 size_t adjust = 0;
126 if (size)
127 {
128 const size_t aligned = align_up(size, align);
129
130 /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
131 if (aligned < block_size_max)
132 {
133 adjust = tlsf_max(aligned, block_size_min);
134 }
135 }
136 return adjust;
137 }
138
139 /*
140 ** TLSF utility functions. In most cases, these are direct translations of
141 ** the documentation found in the white paper.
142 */
143
mapping_insert(size_t size,int * fli,int * sli)144 static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli)
145 {
146 int fl, sl;
147 if (size < SMALL_BLOCK_SIZE)
148 {
149 /* Store small blocks in first list. */
150 fl = 0;
151 sl = tlsf_cast(int, size) >> 2;
152 }
153 else
154 {
155 fl = tlsf_fls(size);
156 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
157 fl -= (FL_INDEX_SHIFT - 1);
158 }
159 *fli = fl;
160 *sli = sl;
161 }
162
163 /* This version rounds up to the next block size (for allocations) */
mapping_search(size_t size,int * fli,int * sli)164 static inline __attribute__((__always_inline__)) void mapping_search(size_t size, int* fli, int* sli)
165 {
166 if (size >= SMALL_BLOCK_SIZE)
167 {
168 const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
169 size += round;
170 }
171 mapping_insert(size, fli, sli);
172 }
173
search_suitable_block(control_t * control,int * fli,int * sli)174 static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
175 {
176 int fl = *fli;
177 int sl = *sli;
178
179 /*
180 ** First, search for a block in the list associated with the given
181 ** fl/sl index.
182 */
183 unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
184 if (!sl_map)
185 {
186 /* No block exists. Search in the next largest first-level list. */
187 const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
188 if (!fl_map)
189 {
190 /* No free blocks available, memory has been exhausted. */
191 return 0;
192 }
193
194 fl = tlsf_ffs(fl_map);
195 *fli = fl;
196 sl_map = control->sl_bitmap[fl];
197 }
198 tlsf_assert(sl_map && "internal error - second level bitmap is null");
199 sl = tlsf_ffs(sl_map);
200 *sli = sl;
201
202 /* Return the first block in the free list. */
203 return control->blocks[fl][sl];
204 }
205
206 /* Remove a free block from the free list.*/
remove_free_block(control_t * control,block_header_t * block,int fl,int sl)207 static inline __attribute__((__always_inline__)) void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
208 {
209 block_header_t* prev = block->prev_free;
210 block_header_t* next = block->next_free;
211 tlsf_assert(prev && "prev_free field can not be null");
212 tlsf_assert(next && "next_free field can not be null");
213 next->prev_free = prev;
214 prev->next_free = next;
215
216 /* If this block is the head of the free list, set new head. */
217 if (control->blocks[fl][sl] == block)
218 {
219 control->blocks[fl][sl] = next;
220
221 /* If the new head is null, clear the bitmap. */
222 if (next == &control->block_null)
223 {
224 control->sl_bitmap[fl] &= ~(1 << sl);
225
226 /* If the second bitmap is now empty, clear the fl bitmap. */
227 if (!control->sl_bitmap[fl])
228 {
229 control->fl_bitmap &= ~(1 << fl);
230 }
231 }
232 }
233 }
234
235 /* Insert a free block into the free block list. */
insert_free_block(control_t * control,block_header_t * block,int fl,int sl)236 static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
237 {
238 block_header_t* current = control->blocks[fl][sl];
239 tlsf_assert(current && "free list cannot have a null entry");
240 tlsf_assert(block && "cannot insert a null entry into the free list");
241 block->next_free = current;
242 block->prev_free = &control->block_null;
243 current->prev_free = block;
244
245 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
246 && "block not aligned properly");
247 /*
248 ** Insert the new block at the head of the list, and mark the first-
249 ** and second-level bitmaps appropriately.
250 */
251 control->blocks[fl][sl] = block;
252 control->fl_bitmap |= (1 << fl);
253 control->sl_bitmap[fl] |= (1 << sl);
254 }
255
256 /* Remove a given block from the free list. */
block_remove(control_t * control,block_header_t * block)257 static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block)
258 {
259 int fl, sl;
260 mapping_insert(block_size(block), &fl, &sl);
261 remove_free_block(control, block, fl, sl);
262 }
263
264 /* Insert a given block into the free list. */
block_insert(control_t * control,block_header_t * block)265 static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block)
266 {
267 int fl, sl;
268 mapping_insert(block_size(block), &fl, &sl);
269 insert_free_block(control, block, fl, sl);
270 }
271
block_can_split(block_header_t * block,size_t size)272 static inline __attribute__((__always_inline__)) int block_can_split(block_header_t* block, size_t size)
273 {
274 return block_size(block) >= sizeof(block_header_t) + size;
275 }
276
277 /* Split a block into two, the second of which is free. */
block_split(block_header_t * block,size_t size)278 static inline __attribute__((__always_inline__)) block_header_t* block_split(block_header_t* block, size_t size)
279 {
280 /* Calculate the amount of space left in the remaining block.
281 * REMINDER: remaining pointer's first field is `prev_phys_block` but this field is part of the
282 * previous physical block. */
283 block_header_t* remaining =
284 offset_to_block(block_to_ptr(block), size - block_header_overhead);
285
286 /* `size` passed as an argument is the first block's new size, thus, the remaining block's size
287 * is `block_size(block) - size`. However, the block's data must be precedeed by the data size.
288 * This field is NOT part of the size, so it has to be substracted from the calculation. */
289 const size_t remain_size = block_size(block) - (size + block_header_overhead);
290
291 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
292 && "remaining block not aligned properly");
293
294 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
295 block_set_size(remaining, remain_size);
296 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
297
298 block_set_size(block, size);
299 block_mark_as_free(remaining);
300
301 /**
302 * Here is the final outcome of this function:
303 *
304 * block remaining (block_ptr + size - BHO)
305 * + +
306 * | |
307 * v v
308 * +----------------------------------------------------------------------+
309 * |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
310 * |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
311 * |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
312 * |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
313 * +----------------------------------------------------------------------+
314 * | | | |
315 * + +<------------------------->+ +<------------------------->
316 * BHO `size` (argument) bytes BHO `remain_size` bytes
317 *
318 * Where BHO = block_header_overhead,
319 * 0: part of the memory owned by a `block`'s previous neighbour,
320 * x: part of the memory owned by `block`.
321 * #: part of the memory owned by `remaining`.
322 */
323
324 return remaining;
325 }
326
327 /* Absorb a free block's storage into an adjacent previous free block. */
block_absorb(block_header_t * prev,block_header_t * block)328 static inline __attribute__((__always_inline__)) block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
329 {
330 tlsf_assert(!block_is_last(prev) && "previous block can't be last");
331 /* Note: Leaves flags untouched. */
332 prev->size += block_size(block) + block_header_overhead;
333 block_link_next(prev);
334
335 #ifdef MULTI_HEAP_POISONING_SLOW
336 /* next_block header needs to be replaced with a fill pattern */
337 multi_heap_internal_poison_fill_region(block, sizeof(block_header_t), true /* free */);
338 #endif
339
340 return prev;
341 }
342
343 /* Merge a just-freed block with an adjacent previous free block. */
block_merge_prev(control_t * control,block_header_t * block)344 static inline __attribute__((__always_inline__)) block_header_t* block_merge_prev(control_t* control, block_header_t* block)
345 {
346 if (block_is_prev_free(block))
347 {
348 block_header_t* prev = block_prev(block);
349 tlsf_assert(prev && "prev physical block can't be null");
350 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
351 block_remove(control, prev);
352 block = block_absorb(prev, block);
353 }
354
355 return block;
356 }
357
358 /* Merge a just-freed block with an adjacent free block. */
block_merge_next(control_t * control,block_header_t * block)359 static inline __attribute__((__always_inline__)) block_header_t* block_merge_next(control_t* control, block_header_t* block)
360 {
361 block_header_t* next = block_next(block);
362 tlsf_assert(next && "next physical block can't be null");
363
364 if (block_is_free(next))
365 {
366 tlsf_assert(!block_is_last(block) && "previous block can't be last");
367 block_remove(control, next);
368 block = block_absorb(block, next);
369 }
370
371 return block;
372 }
373
374 /* Trim any trailing block space off the end of a block, return to pool. */
block_trim_free(control_t * control,block_header_t * block,size_t size)375 static inline __attribute__((__always_inline__)) void block_trim_free(control_t* control, block_header_t* block, size_t size)
376 {
377 tlsf_assert(block_is_free(block) && "block must be free");
378 if (block_can_split(block, size))
379 {
380 block_header_t* remaining_block = block_split(block, size);
381 block_link_next(block);
382 block_set_prev_free(remaining_block);
383 block_insert(control, remaining_block);
384 }
385 }
386
387 /* Trim any trailing block space off the end of a used block, return to pool. */
block_trim_used(control_t * control,block_header_t * block,size_t size)388 static inline __attribute__((__always_inline__)) void block_trim_used(control_t* control, block_header_t* block, size_t size)
389 {
390 tlsf_assert(!block_is_free(block) && "block must be used");
391 if (block_can_split(block, size))
392 {
393 /* If the next block is free, we must coalesce. */
394 block_header_t* remaining_block = block_split(block, size);
395 block_set_prev_used(remaining_block);
396
397 remaining_block = block_merge_next(control, remaining_block);
398 block_insert(control, remaining_block);
399 }
400 }
401
block_trim_free_leading(control_t * control,block_header_t * block,size_t size)402 static inline __attribute__((__always_inline__)) block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
403 {
404 block_header_t* remaining_block = block;
405 if (block_can_split(block, size))
406 {
407 /* We want to split `block` in two: the first block will be freed and the
408 * second block will be returned. */
409 remaining_block = block_split(block, size - block_header_overhead);
410
411 /* `remaining_block` is the second block, mark its predecessor (first
412 * block) as free. */
413 block_set_prev_free(remaining_block);
414
415 block_link_next(block);
416
417 /* Put back the first block into the free memory list. */
418 block_insert(control, block);
419 }
420
421 return remaining_block;
422 }
423
block_locate_free(control_t * control,size_t size)424 static inline __attribute__((__always_inline__)) block_header_t* block_locate_free(control_t* control, size_t size)
425 {
426 int fl = 0, sl = 0;
427 block_header_t* block = 0;
428
429 if (size)
430 {
431 mapping_search(size, &fl, &sl);
432
433 /*
434 ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up
435 ** with indices that are off the end of the block array.
436 ** So, we protect against that here, since this is the only callsite of mapping_search.
437 ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.
438 */
439 if (fl < FL_INDEX_COUNT)
440 {
441 block = search_suitable_block(control, &fl, &sl);
442 }
443 }
444
445 if (block)
446 {
447 tlsf_assert(block_size(block) >= size);
448 remove_free_block(control, block, fl, sl);
449 }
450
451 return block;
452 }
453
block_prepare_used(control_t * control,block_header_t * block,size_t size)454 static inline __attribute__((__always_inline__)) void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
455 {
456 void* p = 0;
457 if (block)
458 {
459 tlsf_assert(size && "size must be non-zero");
460 block_trim_free(control, block, size);
461 block_mark_as_used(block);
462 p = block_to_ptr(block);
463 }
464 return p;
465 }
466
467 /* Clear structure and point all empty lists at the null block. */
control_construct(control_t * control)468 static void control_construct(control_t* control)
469 {
470 int i, j;
471
472 control->block_null.next_free = &control->block_null;
473 control->block_null.prev_free = &control->block_null;
474
475 control->fl_bitmap = 0;
476 for (i = 0; i < FL_INDEX_COUNT; ++i)
477 {
478 control->sl_bitmap[i] = 0;
479 for (j = 0; j < SL_INDEX_COUNT; ++j)
480 {
481 control->blocks[i][j] = &control->block_null;
482 }
483 }
484 }
485
486 /*
487 ** Debugging utilities.
488 */
489
490 typedef struct integrity_t
491 {
492 int prev_status;
493 int status;
494 } integrity_t;
495
496 #define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
497
integrity_walker(void * ptr,size_t size,int used,void * user)498 static void integrity_walker(void* ptr, size_t size, int used, void* user)
499 {
500 block_header_t* block = block_from_ptr(ptr);
501 integrity_t* integ = tlsf_cast(integrity_t*, user);
502 const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
503 const int this_status = block_is_free(block) ? 1 : 0;
504 const size_t this_block_size = block_size(block);
505
506 int status = 0;
507 (void)used;
508 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
509 tlsf_insist(size == this_block_size && "block size incorrect");
510
511 integ->prev_status = this_status;
512 integ->status += status;
513 }
514
tlsf_check(tlsf_t tlsf)515 int tlsf_check(tlsf_t tlsf)
516 {
517 int i, j;
518
519 control_t* control = tlsf_cast(control_t*, tlsf);
520 int status = 0;
521
522 /* Check that the free lists and bitmaps are accurate. */
523 for (i = 0; i < FL_INDEX_COUNT; ++i)
524 {
525 for (j = 0; j < SL_INDEX_COUNT; ++j)
526 {
527 const int fl_map = control->fl_bitmap & (1 << i);
528 const int sl_list = control->sl_bitmap[i];
529 const int sl_map = sl_list & (1 << j);
530 const block_header_t* block = control->blocks[i][j];
531
532 /* Check that first- and second-level lists agree. */
533 if (!fl_map)
534 {
535 tlsf_insist(!sl_map && "second-level map must be null");
536 }
537
538 if (!sl_map)
539 {
540 tlsf_insist(block == &control->block_null && "block list must be null");
541 continue;
542 }
543
544 /* Check that there is at least one free block. */
545 tlsf_insist(sl_list && "no free blocks in second-level map");
546 tlsf_insist(block != &control->block_null && "block should not be null");
547
548 while (block != &control->block_null)
549 {
550 int fli, sli;
551 tlsf_insist(block_is_free(block) && "block should be free");
552 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
553 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
554 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
555 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
556
557 mapping_insert(block_size(block), &fli, &sli);
558 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
559 block = block->next_free;
560 }
561 }
562 }
563
564 return status;
565 }
566
567 #undef tlsf_insist
568
default_walker(void * ptr,size_t size,int used,void * user)569 static void default_walker(void* ptr, size_t size, int used, void* user)
570 {
571 (void)user;
572 printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
573 }
574
tlsf_walk_pool(pool_t pool,tlsf_walker walker,void * user)575 void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
576 {
577 tlsf_walker pool_walker = walker ? walker : default_walker;
578 block_header_t* block =
579 offset_to_block(pool, -(int)block_header_overhead);
580
581 while (block && !block_is_last(block))
582 {
583 pool_walker(
584 block_to_ptr(block),
585 block_size(block),
586 !block_is_free(block),
587 user);
588 block = block_next(block);
589 }
590 }
591
tlsf_block_size(void * ptr)592 size_t tlsf_block_size(void* ptr)
593 {
594 size_t size = 0;
595 if (ptr)
596 {
597 const block_header_t* block = block_from_ptr(ptr);
598 size = block_size(block);
599 }
600 return size;
601 }
602
tlsf_check_pool(pool_t pool)603 int tlsf_check_pool(pool_t pool)
604 {
605 /* Check that the blocks are physically correct. */
606 integrity_t integ = { 0, 0 };
607 tlsf_walk_pool(pool, integrity_walker, &integ);
608
609 return integ.status;
610 }
611
612 /*
613 ** Size of the TLSF structures in a given memory block passed to
614 ** tlsf_create, equal to the size of a control_t
615 */
tlsf_size(void)616 size_t tlsf_size(void)
617 {
618 return sizeof(control_t);
619 }
620
tlsf_align_size(void)621 size_t tlsf_align_size(void)
622 {
623 return ALIGN_SIZE;
624 }
625
tlsf_block_size_min(void)626 size_t tlsf_block_size_min(void)
627 {
628 return block_size_min;
629 }
630
tlsf_block_size_max(void)631 size_t tlsf_block_size_max(void)
632 {
633 return block_size_max;
634 }
635
636 /*
637 ** Overhead of the TLSF structures in a given memory block passed to
638 ** tlsf_add_pool, equal to the overhead of a free block and the
639 ** sentinel block.
640 */
tlsf_pool_overhead(void)641 size_t tlsf_pool_overhead(void)
642 {
643 return 2 * block_header_overhead;
644 }
645
tlsf_alloc_overhead(void)646 size_t tlsf_alloc_overhead(void)
647 {
648 return block_header_overhead;
649 }
650
tlsf_add_pool(tlsf_t tlsf,void * mem,size_t bytes)651 pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
652 {
653 block_header_t* block;
654 block_header_t* next;
655
656 const size_t pool_overhead = tlsf_pool_overhead();
657 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
658
659 if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
660 {
661 printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
662 (unsigned int)ALIGN_SIZE);
663 return 0;
664 }
665
666 if (pool_bytes < block_size_min || pool_bytes > block_size_max)
667 {
668 #if defined (TLSF_64BIT)
669 printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
670 (unsigned int)(pool_overhead + block_size_min),
671 (unsigned int)((pool_overhead + block_size_max) / 256));
672 #else
673 printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
674 (unsigned int)(pool_overhead + block_size_min),
675 (unsigned int)(pool_overhead + block_size_max));
676 #endif
677 return 0;
678 }
679
680 /*
681 ** Create the main free block. Offset the start of the block slightly
682 ** so that the prev_phys_block field falls outside of the pool -
683 ** it will never be used.
684 */
685 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
686 block_set_size(block, pool_bytes);
687 block_set_free(block);
688 block_set_prev_used(block);
689 block_insert(tlsf_cast(control_t*, tlsf), block);
690
691 /* Split the block to create a zero-size sentinel block. */
692 next = block_link_next(block);
693 block_set_size(next, 0);
694 block_set_used(next);
695 block_set_prev_free(next);
696
697 return mem;
698 }
699
tlsf_remove_pool(tlsf_t tlsf,pool_t pool)700 void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
701 {
702 control_t* control = tlsf_cast(control_t*, tlsf);
703 block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
704
705 int fl = 0, sl = 0;
706
707 tlsf_assert(block_is_free(block) && "block should be free");
708 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
709 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
710
711 mapping_insert(block_size(block), &fl, &sl);
712 remove_free_block(control, block, fl, sl);
713 }
714
715 /*
716 ** TLSF main interface.
717 */
718
719
tlsf_create(void * mem)720 tlsf_t tlsf_create(void* mem)
721 {
722 #if _DEBUG
723 if (test_ffs_fls())
724 {
725 return 0;
726 }
727 #endif
728
729 if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
730 {
731 printf("tlsf_create: Memory must be aligned to %u bytes.\n",
732 (unsigned int)ALIGN_SIZE);
733 return 0;
734 }
735
736 control_construct(tlsf_cast(control_t*, mem));
737
738 return tlsf_cast(tlsf_t, mem);
739 }
740
tlsf_get_pool(tlsf_t tlsf)741 pool_t tlsf_get_pool(tlsf_t tlsf)
742 {
743 return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
744 }
745
tlsf_create_with_pool(void * mem,size_t bytes)746 tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
747 {
748 tlsf_t tlsf = tlsf_create(mem);
749 tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
750 return tlsf;
751 }
752
tlsf_malloc(tlsf_t tlsf,size_t size)753 void* tlsf_malloc(tlsf_t tlsf, size_t size)
754 {
755 control_t* control = tlsf_cast(control_t*, tlsf);
756 size_t adjust = adjust_request_size(size, ALIGN_SIZE);
757 block_header_t* block = block_locate_free(control, adjust);
758 return block_prepare_used(control, block, adjust);
759 }
760
761 /**
762 * @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
763 *
764 * This function will allocate memory pointed by `ptr`. However, the byte at `data_offset` of
765 * this piece of memory (i.e., byte at `ptr` + `data_offset`) will be aligned to `alignment`.
766 * This function is useful for allocating memory that will internally have a header, and the
767 * usable memory following the header (i.e. `ptr` + `data_offset`) must be aligned.
768 *
769 * For example, a call to `multi_heap_aligned_alloc_impl_offs(heap, 64, 256, 20)` will return a
770 * pointer `ptr` to free memory of minimum 64 bytes, where `ptr + 20` is aligned on `256`.
771 * So `(ptr + 20) % 256` equals 0.
772 *
773 * @param tlsf TLSF structure to allocate memory from.
774 * @param align Alignment for the returned pointer's offset.
775 * @param size Minimum size, in bytes, of the memory to allocate INCLUDING
776 * `data_offset` bytes.
777 * @param data_offset Offset to be aligned on `alignment`. This can be 0, in
778 * this case, the returned pointer will be aligned on
779 * `alignment`. If it is not a multiple of CPU word size,
780 * it will be aligned up to the closest multiple of it.
781 *
782 * @return pointer to free memory.
783 */
tlsf_memalign_offs(tlsf_t tlsf,size_t align,size_t size,size_t data_offset)784 void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset)
785 {
786 control_t* control = tlsf_cast(control_t*, tlsf);
787 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
788 const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);
789
790 /*
791 ** We must allocate an additional minimum block size bytes so that if
792 ** our free block will leave an alignment gap which is smaller, we can
793 ** trim a leading free block and release it back to the pool. We must
794 ** do this because the previous physical block is in use, therefore
795 ** the prev_phys_block field is not valid, and we can't simply adjust
796 ** the size of that block.
797 */
798 const size_t gap_minimum = sizeof(block_header_t) + off_adjust;
799 /* The offset is included in both `adjust` and `gap_minimum`, so we
800 ** need to subtract it once.
801 */
802 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align);
803
804 /*
805 ** If alignment is less than or equals base alignment, we're done.
806 ** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
807 */
808 const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
809
810 block_header_t* block = block_locate_free(control, aligned_size);
811
812 /* This can't be a static assert. */
813 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
814
815 if (block)
816 {
817 void* ptr = block_to_ptr(block);
818 void* aligned = align_ptr(ptr, align);
819 size_t gap = tlsf_cast(size_t,
820 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
821
822 /*
823 ** If gap size is too small or if there is not gap but we need one,
824 ** offset to next aligned boundary.
825 */
826 if ((gap && gap < gap_minimum) || (!gap && off_adjust))
827 {
828 const size_t gap_remain = gap_minimum - gap;
829 const size_t offset = tlsf_max(gap_remain, align);
830 const void* next_aligned = tlsf_cast(void*,
831 tlsf_cast(tlsfptr_t, aligned) + offset);
832
833 aligned = align_ptr(next_aligned, align);
834 gap = tlsf_cast(size_t,
835 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
836 }
837
838 if (gap)
839 {
840 tlsf_assert(gap >= gap_minimum && "gap size too small");
841 block = block_trim_free_leading(control, block, gap - off_adjust);
842 }
843 }
844
845 /* Preparing the block will also the trailing free memory. */
846 return block_prepare_used(control, block, adjust);
847 }
848
849 /**
850 * @brief Same as `tlsf_memalign_offs` function but with a 0 offset.
851 * The pointer returned is aligned on `align`.
852 */
tlsf_memalign(tlsf_t tlsf,size_t align,size_t size)853 void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
854 {
855 return tlsf_memalign_offs(tlsf, align, size, 0);
856 }
857
858
tlsf_free(tlsf_t tlsf,void * ptr)859 void tlsf_free(tlsf_t tlsf, void* ptr)
860 {
861 /* Don't attempt to free a NULL pointer. */
862 if (ptr)
863 {
864 control_t* control = tlsf_cast(control_t*, tlsf);
865 block_header_t* block = block_from_ptr(ptr);
866 tlsf_assert(!block_is_free(block) && "block already marked as free");
867 block_mark_as_free(block);
868 block = block_merge_prev(control, block);
869 block = block_merge_next(control, block);
870 block_insert(control, block);
871 }
872 }
873
874 /*
875 ** The TLSF block information provides us with enough information to
876 ** provide a reasonably intelligent implementation of realloc, growing or
877 ** shrinking the currently allocated block as required.
878 **
879 ** This routine handles the somewhat esoteric edge cases of realloc:
880 ** - a non-zero size with a null pointer will behave like malloc
881 ** - a zero size with a non-null pointer will behave like free
882 ** - a request that cannot be satisfied will leave the original buffer
883 ** untouched
884 ** - an extended buffer size will leave the newly-allocated area with
885 ** contents undefined
886 */
tlsf_realloc(tlsf_t tlsf,void * ptr,size_t size)887 void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
888 {
889 control_t* control = tlsf_cast(control_t*, tlsf);
890 void* p = 0;
891
892 /* Zero-size requests are treated as free. */
893 if (ptr && size == 0)
894 {
895 tlsf_free(tlsf, ptr);
896 }
897 /* Requests with NULL pointers are treated as malloc. */
898 else if (!ptr)
899 {
900 p = tlsf_malloc(tlsf, size);
901 }
902 else
903 {
904 block_header_t* block = block_from_ptr(ptr);
905 block_header_t* next = block_next(block);
906
907 const size_t cursize = block_size(block);
908 const size_t combined = cursize + block_size(next) + block_header_overhead;
909 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
910
911 tlsf_assert(!block_is_free(block) && "block already marked as free");
912
913 /*
914 ** If the next block is used, or when combined with the current
915 ** block, does not offer enough space, we must reallocate and copy.
916 */
917 if (adjust > cursize && (!block_is_free(next) || adjust > combined))
918 {
919 p = tlsf_malloc(tlsf, size);
920 if (p)
921 {
922 const size_t minsize = tlsf_min(cursize, size);
923 memcpy(p, ptr, minsize);
924 tlsf_free(tlsf, ptr);
925 }
926 }
927 else
928 {
929 /* Do we need to expand to the next block? */
930 if (adjust > cursize)
931 {
932 block_merge_next(control, block);
933 block_mark_as_used(block);
934 }
935
936 /* Trim the resulting block and return the original pointer. */
937 block_trim_used(control, block, adjust);
938 p = ptr;
939 }
940 }
941
942 return p;
943 }
944