1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
7 #define ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
8
9 /*
10 * Internal heap APIs
11 */
12
13 /* Theese validation checks are non-trivially expensive, so enable
14 * only when debugging the heap code. They shouldn't be routine
15 * assertions.
16 */
17 #ifdef CONFIG_SYS_HEAP_VALIDATE
18 #define CHECK(x) __ASSERT(x, "")
19 #else
20 #define CHECK(x) /**/
21 #endif
22
23 /* Chunks are identified by their offset in 8 byte units from the
24 * first address in the buffer (a zero-valued chunkid_t is used as a
25 * null; that chunk would always point into the metadata at the start
26 * of the heap and cannot be allocated). They are prefixed by a
27 * variable size header that depends on the size of the heap. Heaps
28 * with fewer than 2^15 units (256kb) of storage use shorts to store
29 * the fields, otherwise the units are 32 bit integers for a 16Gb heap
30 * space (larger spaces really aren't in scope for this code, but
31 * could be handled similarly I suppose). Because of that design
32 * there's a certain amount of boilerplate API needed to expose the
33 * field accessors since we can't use natural syntax.
34 *
35 * The fields are:
36 * LEFT_SIZE: The size of the left (next lower chunk in memory)
37 * neighbor chunk.
38 * SIZE_AND_USED: the total size (including header) of the chunk in
39 * 8-byte units. The bottom bit stores a "used" flag.
40 * FREE_PREV: Chunk ID of the previous node in a free list.
41 * FREE_NEXT: Chunk ID of the next node in a free list.
42 *
43 * The free lists are circular lists, one for each power-of-two size
44 * category. The free list pointers exist only for free chunks,
45 * obviously. This memory is part of the user's buffer when
46 * allocated.
47 *
48 * The field order is so that allocated buffers are immediately bounded
49 * by SIZE_AND_USED of the current chunk at the bottom, and LEFT_SIZE of
50 * the following chunk at the top. This ordering allows for quick buffer
51 * overflow detection by testing left_chunk(c + chunk_size(c)) == c.
52 */
53
54 enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
55
56 #define CHUNK_UNIT 8U
57
58 typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
59
60 /* big_heap needs uint32_t, small_heap needs uint16_t */
61 typedef uint32_t chunkid_t;
62 typedef uint32_t chunksz_t;
63
64 struct z_heap_bucket {
65 chunkid_t next;
66 };
67
68 struct z_heap {
69 chunkid_t chunk0_hdr[2];
70 chunkid_t end_chunk;
71 uint32_t avail_buckets;
72 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
73 size_t free_bytes;
74 size_t allocated_bytes;
75 size_t max_allocated_bytes;
76 #endif
77 struct z_heap_bucket buckets[0];
78 };
79
big_heap_chunks(chunksz_t chunks)80 static inline bool big_heap_chunks(chunksz_t chunks)
81 {
82 if (IS_ENABLED(CONFIG_SYS_HEAP_SMALL_ONLY)) {
83 return false;
84 }
85 if (IS_ENABLED(CONFIG_SYS_HEAP_BIG_ONLY) || sizeof(void *) > 4U) {
86 return true;
87 }
88 return chunks > 0x7fffU;
89 }
90
big_heap_bytes(size_t bytes)91 static inline bool big_heap_bytes(size_t bytes)
92 {
93 return big_heap_chunks(bytes / CHUNK_UNIT);
94 }
95
big_heap(struct z_heap * h)96 static inline bool big_heap(struct z_heap *h)
97 {
98 return big_heap_chunks(h->end_chunk);
99 }
100
chunk_buf(struct z_heap * h)101 static inline chunk_unit_t *chunk_buf(struct z_heap *h)
102 {
103 /* the struct z_heap matches with the first chunk */
104 return (chunk_unit_t *)h;
105 }
106
chunk_field(struct z_heap * h,chunkid_t c,enum chunk_fields f)107 static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
108 enum chunk_fields f)
109 {
110 chunk_unit_t *buf = chunk_buf(h);
111 void *cmem = &buf[c];
112
113 if (big_heap(h)) {
114 return ((uint32_t *)cmem)[f];
115 } else {
116 return ((uint16_t *)cmem)[f];
117 }
118 }
119
chunk_set(struct z_heap * h,chunkid_t c,enum chunk_fields f,chunkid_t val)120 static inline void chunk_set(struct z_heap *h, chunkid_t c,
121 enum chunk_fields f, chunkid_t val)
122 {
123 CHECK(c <= h->end_chunk);
124
125 chunk_unit_t *buf = chunk_buf(h);
126 void *cmem = &buf[c];
127
128 if (big_heap(h)) {
129 CHECK(val == (uint32_t)val);
130 ((uint32_t *)cmem)[f] = val;
131 } else {
132 CHECK(val == (uint16_t)val);
133 ((uint16_t *)cmem)[f] = val;
134 }
135 }
136
chunk_used(struct z_heap * h,chunkid_t c)137 static inline bool chunk_used(struct z_heap *h, chunkid_t c)
138 {
139 return chunk_field(h, c, SIZE_AND_USED) & 1U;
140 }
141
chunk_size(struct z_heap * h,chunkid_t c)142 static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
143 {
144 return chunk_field(h, c, SIZE_AND_USED) >> 1;
145 }
146
set_chunk_used(struct z_heap * h,chunkid_t c,bool used)147 static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
148 {
149 chunk_unit_t *buf = chunk_buf(h);
150 void *cmem = &buf[c];
151
152 if (big_heap(h)) {
153 if (used) {
154 ((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
155 } else {
156 ((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
157 }
158 } else {
159 if (used) {
160 ((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
161 } else {
162 ((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
163 }
164 }
165 }
166
167 /*
168 * Note: no need to preserve the used bit here as the chunk is never in use
169 * when its size is modified, and potential set_chunk_used() is always
170 * invoked after set_chunk_size().
171 */
set_chunk_size(struct z_heap * h,chunkid_t c,chunksz_t size)172 static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
173 {
174 chunk_set(h, c, SIZE_AND_USED, size << 1);
175 }
176
prev_free_chunk(struct z_heap * h,chunkid_t c)177 static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
178 {
179 return chunk_field(h, c, FREE_PREV);
180 }
181
next_free_chunk(struct z_heap * h,chunkid_t c)182 static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
183 {
184 return chunk_field(h, c, FREE_NEXT);
185 }
186
set_prev_free_chunk(struct z_heap * h,chunkid_t c,chunkid_t prev)187 static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
188 chunkid_t prev)
189 {
190 chunk_set(h, c, FREE_PREV, prev);
191 }
192
set_next_free_chunk(struct z_heap * h,chunkid_t c,chunkid_t next)193 static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
194 chunkid_t next)
195 {
196 chunk_set(h, c, FREE_NEXT, next);
197 }
198
left_chunk(struct z_heap * h,chunkid_t c)199 static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
200 {
201 return c - chunk_field(h, c, LEFT_SIZE);
202 }
203
right_chunk(struct z_heap * h,chunkid_t c)204 static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
205 {
206 return c + chunk_size(h, c);
207 }
208
set_left_chunk_size(struct z_heap * h,chunkid_t c,chunksz_t size)209 static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
210 chunksz_t size)
211 {
212 chunk_set(h, c, LEFT_SIZE, size);
213 }
214
solo_free_header(struct z_heap * h,chunkid_t c)215 static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
216 {
217 return big_heap(h) && chunk_size(h, c) == 1U;
218 }
219
chunk_header_bytes(struct z_heap * h)220 static inline size_t chunk_header_bytes(struct z_heap *h)
221 {
222 return big_heap(h) ? 8 : 4;
223 }
224
heap_footer_bytes(size_t size)225 static inline size_t heap_footer_bytes(size_t size)
226 {
227 return big_heap_bytes(size) ? 8 : 4;
228 }
229
chunksz(size_t bytes)230 static inline chunksz_t chunksz(size_t bytes)
231 {
232 return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
233 }
234
bytes_to_chunksz(struct z_heap * h,size_t bytes)235 static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
236 {
237 return chunksz(chunk_header_bytes(h) + bytes);
238 }
239
min_chunk_size(struct z_heap * h)240 static inline chunksz_t min_chunk_size(struct z_heap *h)
241 {
242 return bytes_to_chunksz(h, 1);
243 }
244
chunksz_to_bytes(struct z_heap * h,chunksz_t chunksz_in)245 static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz_in)
246 {
247 return chunksz_in * CHUNK_UNIT - chunk_header_bytes(h);
248 }
249
bucket_idx(struct z_heap * h,chunksz_t sz)250 static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
251 {
252 unsigned int usable_sz = sz - min_chunk_size(h) + 1;
253 return 31 - __builtin_clz(usable_sz);
254 }
255
size_too_big(struct z_heap * h,size_t bytes)256 static inline bool size_too_big(struct z_heap *h, size_t bytes)
257 {
258 /*
259 * Quick check to bail out early if size is too big.
260 * Also guards against potential arithmetic overflows elsewhere.
261 */
262 return (bytes / CHUNK_UNIT) >= h->end_chunk;
263 }
264
get_alloc_info(struct z_heap * h,size_t * alloc_bytes,size_t * free_bytes)265 static inline void get_alloc_info(struct z_heap *h, size_t *alloc_bytes,
266 size_t *free_bytes)
267 {
268 chunkid_t c;
269
270 *alloc_bytes = 0;
271 *free_bytes = 0;
272
273 for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
274 if (chunk_used(h, c)) {
275 *alloc_bytes += chunksz_to_bytes(h, chunk_size(h, c));
276 } else if (!solo_free_header(h, c)) {
277 *free_bytes += chunksz_to_bytes(h, chunk_size(h, c));
278 }
279 }
280 }
281
282 #endif /* ZEPHYR_INCLUDE_LIB_OS_HEAP_H_ */
283