1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #ifndef ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
7 #define ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
8
9 /*
10 * Internal heap APIs
11 */
12
13 /* Theese validation checks are non-trivially expensive, so enable
14 * only when debugging the heap code. They shouldn't be routine
15 * assertions.
16 */
17 #ifdef CONFIG_SYS_HEAP_VALIDATE
18 #define CHECK(x) __ASSERT(x, "")
19 #else
20 #define CHECK(x) /**/
21 #endif
22
23 /* Chunks are identified by their offset in 8 byte units from the
24 * first address in the buffer (a zero-valued chunkid_t is used as a
25 * null; that chunk would always point into the metadata at the start
26 * of the heap and cannot be allocated). They are prefixed by a
27 * variable size header that depends on the size of the heap. Heaps
28 * with fewer than 2^15 units (256kb) of storage use shorts to store
29 * the fields, otherwise the units are 32 bit integers for a 16Gb heap
30 * space (larger spaces really aren't in scope for this code, but
31 * could be handled similarly I suppose). Because of that design
32 * there's a certain amount of boilerplate API needed to expose the
33 * field accessors since we can't use natural syntax.
34 *
35 * The fields are:
36 * LEFT_SIZE: The size of the left (next lower chunk in memory)
37 * neighbor chunk.
38 * SIZE_AND_USED: the total size (including header) of the chunk in
39 * 8-byte units. The bottom bit stores a "used" flag.
40 * FREE_PREV: Chunk ID of the previous node in a free list.
41 * FREE_NEXT: Chunk ID of the next node in a free list.
42 *
43 * The free lists are circular lists, one for each power-of-two size
44 * category. The free list pointers exist only for free chunks,
45 * obviously. This memory is part of the user's buffer when
46 * allocated.
47 *
48 * The field order is so that allocated buffers are immediately bounded
49 * by SIZE_AND_USED of the current chunk at the bottom, and LEFT_SIZE of
50 * the following chunk at the top. This ordering allows for quick buffer
51 * overflow detection by testing left_chunk(c + chunk_size(c)) == c.
52 */
53
54 enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
55
56 #define CHUNK_UNIT 8U
57
58 typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
59
60 /* big_heap needs uint32_t, small_heap needs uint16_t */
61 typedef uint32_t chunkid_t;
62 typedef uint32_t chunksz_t;
63
64 struct z_heap_bucket {
65 chunkid_t next;
66 };
67
68 struct z_heap {
69 chunkid_t chunk0_hdr[2];
70 chunkid_t end_chunk;
71 uint32_t avail_buckets;
72 struct z_heap_bucket buckets[0];
73 };
74
big_heap_chunks(chunksz_t chunks)75 static inline bool big_heap_chunks(chunksz_t chunks)
76 {
77 return sizeof(void *) > 4U || chunks > 0x7fffU;
78 }
79
big_heap_bytes(size_t bytes)80 static inline bool big_heap_bytes(size_t bytes)
81 {
82 return big_heap_chunks(bytes / CHUNK_UNIT);
83 }
84
big_heap(struct z_heap * h)85 static inline bool big_heap(struct z_heap *h)
86 {
87 return big_heap_chunks(h->end_chunk);
88 }
89
chunk_buf(struct z_heap * h)90 static inline chunk_unit_t *chunk_buf(struct z_heap *h)
91 {
92 /* the struct z_heap matches with the first chunk */
93 return (chunk_unit_t *)h;
94 }
95
chunk_field(struct z_heap * h,chunkid_t c,enum chunk_fields f)96 static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
97 enum chunk_fields f)
98 {
99 chunk_unit_t *buf = chunk_buf(h);
100 void *cmem = &buf[c];
101
102 if (big_heap(h)) {
103 return ((uint32_t *)cmem)[f];
104 } else {
105 return ((uint16_t *)cmem)[f];
106 }
107 }
108
chunk_set(struct z_heap * h,chunkid_t c,enum chunk_fields f,chunkid_t val)109 static inline void chunk_set(struct z_heap *h, chunkid_t c,
110 enum chunk_fields f, chunkid_t val)
111 {
112 CHECK(c <= h->end_chunk);
113
114 chunk_unit_t *buf = chunk_buf(h);
115 void *cmem = &buf[c];
116
117 if (big_heap(h)) {
118 CHECK(val == (uint32_t)val);
119 ((uint32_t *)cmem)[f] = val;
120 } else {
121 CHECK(val == (uint16_t)val);
122 ((uint16_t *)cmem)[f] = val;
123 }
124 }
125
chunk_used(struct z_heap * h,chunkid_t c)126 static inline bool chunk_used(struct z_heap *h, chunkid_t c)
127 {
128 return chunk_field(h, c, SIZE_AND_USED) & 1U;
129 }
130
chunk_size(struct z_heap * h,chunkid_t c)131 static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
132 {
133 return chunk_field(h, c, SIZE_AND_USED) >> 1;
134 }
135
set_chunk_used(struct z_heap * h,chunkid_t c,bool used)136 static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
137 {
138 chunk_unit_t *buf = chunk_buf(h);
139 void *cmem = &buf[c];
140
141 if (big_heap(h)) {
142 if (used) {
143 ((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
144 } else {
145 ((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
146 }
147 } else {
148 if (used) {
149 ((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
150 } else {
151 ((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
152 }
153 }
154 }
155
156 /*
157 * Note: no need to preserve the used bit here as the chunk is never in use
158 * when its size is modified, and potential set_chunk_used() is always
159 * invoked after set_chunk_size().
160 */
set_chunk_size(struct z_heap * h,chunkid_t c,chunksz_t size)161 static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
162 {
163 chunk_set(h, c, SIZE_AND_USED, size << 1);
164 }
165
prev_free_chunk(struct z_heap * h,chunkid_t c)166 static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
167 {
168 return chunk_field(h, c, FREE_PREV);
169 }
170
next_free_chunk(struct z_heap * h,chunkid_t c)171 static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
172 {
173 return chunk_field(h, c, FREE_NEXT);
174 }
175
set_prev_free_chunk(struct z_heap * h,chunkid_t c,chunkid_t prev)176 static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
177 chunkid_t prev)
178 {
179 chunk_set(h, c, FREE_PREV, prev);
180 }
181
set_next_free_chunk(struct z_heap * h,chunkid_t c,chunkid_t next)182 static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
183 chunkid_t next)
184 {
185 chunk_set(h, c, FREE_NEXT, next);
186 }
187
left_chunk(struct z_heap * h,chunkid_t c)188 static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
189 {
190 return c - chunk_field(h, c, LEFT_SIZE);
191 }
192
right_chunk(struct z_heap * h,chunkid_t c)193 static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
194 {
195 return c + chunk_size(h, c);
196 }
197
set_left_chunk_size(struct z_heap * h,chunkid_t c,chunksz_t size)198 static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
199 chunksz_t size)
200 {
201 chunk_set(h, c, LEFT_SIZE, size);
202 }
203
solo_free_header(struct z_heap * h,chunkid_t c)204 static inline bool solo_free_header(struct z_heap *h, chunkid_t c)
205 {
206 return big_heap(h) && chunk_size(h, c) == 1U;
207 }
208
chunk_header_bytes(struct z_heap * h)209 static inline size_t chunk_header_bytes(struct z_heap *h)
210 {
211 return big_heap(h) ? 8 : 4;
212 }
213
heap_footer_bytes(size_t size)214 static inline size_t heap_footer_bytes(size_t size)
215 {
216 return big_heap_bytes(size) ? 8 : 4;
217 }
218
chunksz(size_t bytes)219 static inline chunksz_t chunksz(size_t bytes)
220 {
221 return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
222 }
223
bytes_to_chunksz(struct z_heap * h,size_t bytes)224 static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
225 {
226 return chunksz(chunk_header_bytes(h) + bytes);
227 }
228
min_chunk_size(struct z_heap * h)229 static inline chunksz_t min_chunk_size(struct z_heap *h)
230 {
231 return bytes_to_chunksz(h, 1);
232 }
233
chunksz_to_bytes(struct z_heap * h,chunksz_t chunksz_in)234 static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz_in)
235 {
236 return chunksz_in * CHUNK_UNIT - chunk_header_bytes(h);
237 }
238
bucket_idx(struct z_heap * h,chunksz_t sz)239 static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
240 {
241 unsigned int usable_sz = sz - min_chunk_size(h) + 1;
242 return 31 - __builtin_clz(usable_sz);
243 }
244
size_too_big(struct z_heap * h,size_t bytes)245 static inline bool size_too_big(struct z_heap *h, size_t bytes)
246 {
247 /*
248 * Quick check to bail out early if size is too big.
249 * Also guards against potential arithmetic overflows elsewhere.
250 */
251 return (bytes / CHUNK_UNIT) >= h->end_chunk;
252 }
253
254 /* For debugging */
255 void heap_print_info(struct z_heap *h, bool dump_chunks);
256
257 #endif /* ZEPHYR_INCLUDE_LIB_OS_HEAP_H_ */
258