1 /*
2 ** Two Level Segregated Fit memory allocator, version 3.1.
3 ** Written by Matthew Conte
4 **	http://tlsf.baisoku.org
5 **
6 ** Based on the original documentation by Miguel Masmano:
7 **	http://www.gii.upv.es/tlsf/main/docs
8 **
9 ** This implementation was written to the specification
10 ** of the document, therefore no GPL restrictions apply.
11 **
12 ** Copyright (c) 2006-2016, Matthew Conte
13 ** All rights reserved.
14 **
15 ** Redistribution and use in source and binary forms, with or without
16 ** modification, are permitted provided that the following conditions are met:
17 **     * Redistributions of source code must retain the above copyright
18 **       notice, this list of conditions and the following disclaimer.
19 **     * Redistributions in binary form must reproduce the above copyright
20 **       notice, this list of conditions and the following disclaimer in the
21 **       documentation and/or other materials provided with the distribution.
22 **     * Neither the name of the copyright holder nor the
23 **       names of its contributors may be used to endorse or promote products
24 **       derived from this software without specific prior written permission.
25 **
26 ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
27 ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 ** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
30 ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
33 ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37 
38 #pragma once
39 
40 /*
41 ** Data structures and associated constants.
42 */
43 
44 /*
45 ** Since block sizes are always at least a multiple of 4, the two least
46 ** significant bits of the size field are used to store the block status:
47 ** - bit 0: whether block is busy or free
48 ** - bit 1: whether previous block is busy or free
49 */
50 #define block_header_free_bit  (1 << 0)
51 #define block_header_prev_free_bit  (1 << 1)
52 
53 /*
54 ** The size of the block header exposed to used blocks is the size field.
55 ** The prev_phys_block field is stored *inside* the previous free block.
56 */
57 #define block_header_overhead  (sizeof(size_t))
58 
59 /* User data starts directly after the size field in a used block. */
60 #define block_start_offset (offsetof(block_header_t, size) + sizeof(size_t))
61 
62 /*
63 ** A free block must be large enough to store its header minus the size of
64 ** the prev_phys_block field, and no larger than the number of addressable
65 ** bits for FL_INDEX.
66 */
67 #define block_size_min  (sizeof(block_header_t) - sizeof(block_header_t*))
68 #define block_size_max  (tlsf_cast(size_t, 1) << FL_INDEX_MAX)
69 
70 /*
71 ** block_header_t member functions.
72 */
block_size(const block_header_t * block)73 static inline __attribute__((__always_inline__)) size_t block_size(const block_header_t* block)
74 {
75 	return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
76 }
77 
block_set_size(block_header_t * block,size_t size)78 static inline __attribute__((__always_inline__)) void block_set_size(block_header_t* block, size_t size)
79 {
80 	const size_t oldsize = block->size;
81 	block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
82 }
83 
block_is_last(const block_header_t * block)84 static inline __attribute__((__always_inline__)) int block_is_last(const block_header_t* block)
85 {
86 	return block_size(block) == 0;
87 }
88 
block_is_free(const block_header_t * block)89 static inline __attribute__((__always_inline__)) int block_is_free(const block_header_t* block)
90 {
91 	return tlsf_cast(int, block->size & block_header_free_bit);
92 }
93 
block_set_free(block_header_t * block)94 static inline __attribute__((__always_inline__)) void block_set_free(block_header_t* block)
95 {
96 	block->size |= block_header_free_bit;
97 }
98 
block_set_used(block_header_t * block)99 static inline __attribute__((__always_inline__)) void block_set_used(block_header_t* block)
100 {
101 	block->size &= ~block_header_free_bit;
102 }
103 
block_is_prev_free(const block_header_t * block)104 static inline __attribute__((__always_inline__)) int block_is_prev_free(const block_header_t* block)
105 {
106 	return tlsf_cast(int, block->size & block_header_prev_free_bit);
107 }
108 
block_set_prev_free(block_header_t * block)109 static inline __attribute__((__always_inline__)) void block_set_prev_free(block_header_t* block)
110 {
111 	block->size |= block_header_prev_free_bit;
112 }
113 
block_set_prev_used(block_header_t * block)114 static inline __attribute__((__always_inline__)) void block_set_prev_used(block_header_t* block)
115 {
116 	block->size &= ~block_header_prev_free_bit;
117 }
118 
block_from_ptr(const void * ptr)119 static inline __attribute__((__always_inline__)) block_header_t* block_from_ptr(const void* ptr)
120 {
121 	return tlsf_cast(block_header_t*,
122 		tlsf_cast(unsigned char*, ptr) - block_start_offset);
123 }
124 
block_to_ptr(const block_header_t * block)125 static inline __attribute__((__always_inline__)) void* block_to_ptr(const block_header_t* block)
126 {
127 	return tlsf_cast(void*,
128 		tlsf_cast(unsigned char*, block) + block_start_offset);
129 }
130 
131 /* Return location of next block after block of given size. */
offset_to_block(const void * ptr,size_t size)132 static inline __attribute__((__always_inline__)) block_header_t* offset_to_block(const void* ptr, size_t size)
133 {
134 	return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
135 }
136 
137 /* Return location of previous block. */
block_prev(const block_header_t * block)138 static inline __attribute__((__always_inline__)) block_header_t* block_prev(const block_header_t* block)
139 {
140 	return block->prev_phys_block;
141 }
142 
143 /* Return location of next existing block. */
block_next(const block_header_t * block)144 static inline __attribute__((__always_inline__)) block_header_t* block_next(const block_header_t* block)
145 {
146 	block_header_t* next = offset_to_block(block_to_ptr(block),
147 		block_size(block) - block_header_overhead);
148 	return next;
149 }
150 
151 /* Link a new block with its physical neighbor, return the neighbor. */
block_link_next(block_header_t * block)152 static inline __attribute__((__always_inline__)) block_header_t* block_link_next(block_header_t* block)
153 {
154 	block_header_t* next = block_next(block);
155 	next->prev_phys_block = block;
156 	return next;
157 }
158 
block_mark_as_free(block_header_t * block)159 static inline __attribute__((__always_inline__)) void block_mark_as_free(block_header_t* block)
160 {
161 	/* Link the block to the next block, first. */
162 	block_header_t* next = block_link_next(block);
163 	block_set_prev_free(next);
164 	block_set_free(block);
165 }
166 
block_mark_as_used(block_header_t * block)167 static inline __attribute__((__always_inline__)) void block_mark_as_used(block_header_t* block)
168 {
169 	block_header_t* next = block_next(block);
170 	block_set_prev_used(next);
171 	block_set_used(block);
172 }
173