1 /*
2 * Copyright (c) 2019 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/sys/sys_heap.h>
7 #include <zephyr/sys/util.h>
8 #include <zephyr/kernel.h>
9 #include "heap.h"
10
11 /* White-box sys_heap validation code. Uses internal data structures.
12 * Not expected to be useful in production apps. This checks every
13 * header field of every chunk and returns true if the totality of the
14 * data structure is a valid heap. It doesn't necessarily tell you
15 * that it is the CORRECT heap given the history of alloc/free calls
16 * that it can't inspect. In a pathological case, you can imagine
17 * something scribbling a copy of a previously-valid heap on top of a
18 * running one and corrupting it. YMMV.
19 */
20
21 #define VALIDATE(cond) do { if (!(cond)) { return false; } } while (0)
22
in_bounds(struct z_heap * h,chunkid_t c)23 static bool in_bounds(struct z_heap *h, chunkid_t c)
24 {
25 VALIDATE(c >= right_chunk(h, 0));
26 VALIDATE(c < h->end_chunk);
27 VALIDATE(chunk_size(h, c) < h->end_chunk);
28 return true;
29 }
30
valid_chunk(struct z_heap * h,chunkid_t c)31 static bool valid_chunk(struct z_heap *h, chunkid_t c)
32 {
33 VALIDATE(chunk_size(h, c) > 0);
34 VALIDATE(c + chunk_size(h, c) <= h->end_chunk);
35 VALIDATE(in_bounds(h, c));
36 VALIDATE(right_chunk(h, left_chunk(h, c)) == c);
37 VALIDATE(left_chunk(h, right_chunk(h, c)) == c);
38 if (chunk_used(h, c)) {
39 VALIDATE(!solo_free_header(h, c));
40 } else {
41 VALIDATE(chunk_used(h, left_chunk(h, c)));
42 VALIDATE(chunk_used(h, right_chunk(h, c)));
43 if (!solo_free_header(h, c)) {
44 VALIDATE(in_bounds(h, prev_free_chunk(h, c)));
45 VALIDATE(in_bounds(h, next_free_chunk(h, c)));
46 }
47 }
48 return true;
49 }
50
51 /* Validate multiple state dimensions for the bucket "next" pointer
52 * and see that they match. Probably should unify the design a
53 * bit...
54 */
check_nexts(struct z_heap * h,int bidx)55 static inline void check_nexts(struct z_heap *h, int bidx)
56 {
57 struct z_heap_bucket *b = &h->buckets[bidx];
58
59 bool emptybit = (h->avail_buckets & BIT(bidx)) == 0;
60 bool emptylist = b->next == 0;
61 bool empties_match = emptybit == emptylist;
62
63 (void)empties_match;
64 CHECK(empties_match);
65
66 if (b->next != 0) {
67 CHECK(valid_chunk(h, b->next));
68 }
69 }
70
get_alloc_info(struct z_heap * h,size_t * alloc_bytes,size_t * free_bytes)71 static void get_alloc_info(struct z_heap *h, size_t *alloc_bytes,
72 size_t *free_bytes)
73 {
74 chunkid_t c;
75
76 *alloc_bytes = 0;
77 *free_bytes = 0;
78
79 for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
80 if (chunk_used(h, c)) {
81 *alloc_bytes += chunksz_to_bytes(h, chunk_size(h, c));
82 } else if (!solo_free_header(h, c)) {
83 *free_bytes += chunksz_to_bytes(h, chunk_size(h, c));
84 }
85 }
86 }
87
sys_heap_validate(struct sys_heap * heap)88 bool sys_heap_validate(struct sys_heap *heap)
89 {
90 struct z_heap *h = heap->heap;
91 chunkid_t c;
92
93 /*
94 * Walk through the chunks linearly, verifying sizes and end pointer.
95 */
96 for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
97 if (!valid_chunk(h, c)) {
98 return false;
99 }
100 }
101 if (c != h->end_chunk) {
102 return false; /* Should have exactly consumed the buffer */
103 }
104
105 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
106 /*
107 * Validate sys_heap_runtime_stats_get API.
108 * Iterate all chunks in sys_heap to get total allocated bytes and
109 * free bytes, then compare with the results of
110 * sys_heap_runtime_stats_get function.
111 */
112 size_t allocated_bytes, free_bytes;
113 struct sys_memory_stats stat;
114
115 get_alloc_info(h, &allocated_bytes, &free_bytes);
116 sys_heap_runtime_stats_get(heap, &stat);
117 if ((stat.allocated_bytes != allocated_bytes) ||
118 (stat.free_bytes != free_bytes)) {
119 return false;
120 }
121 #endif
122
123 /* Check the free lists: entry count should match, empty bit
124 * should be correct, and all chunk entries should point into
125 * valid unused chunks. Mark those chunks USED, temporarily.
126 */
127 for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
128 chunkid_t c0 = h->buckets[b].next;
129 uint32_t n = 0;
130
131 check_nexts(h, b);
132
133 for (c = c0; c != 0 && (n == 0 || c != c0);
134 n++, c = next_free_chunk(h, c)) {
135 if (!valid_chunk(h, c)) {
136 return false;
137 }
138 set_chunk_used(h, c, true);
139 }
140
141 bool empty = (h->avail_buckets & BIT(b)) == 0;
142 bool zero = n == 0;
143
144 if (empty != zero) {
145 return false;
146 }
147
148 if (empty && h->buckets[b].next != 0) {
149 return false;
150 }
151 }
152
153 /*
154 * Walk through the chunks linearly again, verifying that all chunks
155 * but solo headers are now USED (i.e. all free blocks were found
156 * during enumeration). Mark all such blocks UNUSED and solo headers
157 * USED.
158 */
159 chunkid_t prev_chunk = 0;
160 for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
161 if (!chunk_used(h, c) && !solo_free_header(h, c)) {
162 return false;
163 }
164 if (left_chunk(h, c) != prev_chunk) {
165 return false;
166 }
167 prev_chunk = c;
168
169 set_chunk_used(h, c, solo_free_header(h, c));
170 }
171 if (c != h->end_chunk) {
172 return false; /* Should have exactly consumed the buffer */
173 }
174
175 /* Go through the free lists again checking that the linear
176 * pass caught all the blocks and that they now show UNUSED.
177 * Mark them USED.
178 */
179 for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
180 chunkid_t c0 = h->buckets[b].next;
181 int n = 0;
182
183 if (c0 == 0) {
184 continue;
185 }
186
187 for (c = c0; n == 0 || c != c0; n++, c = next_free_chunk(h, c)) {
188 if (chunk_used(h, c)) {
189 return false;
190 }
191 set_chunk_used(h, c, true);
192 }
193 }
194
195 /* Now we are valid, but have managed to invert all the in-use
196 * fields. One more linear pass to fix them up
197 */
198 for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
199 set_chunk_used(h, c, !chunk_used(h, c));
200 }
201 return true;
202 }
203
204 struct z_heap_stress_rec {
205 void *(*alloc_fn)(void *arg, size_t bytes);
206 void (*free_fn)(void *arg, void *p);
207 void *arg;
208 size_t total_bytes;
209 struct z_heap_stress_block *blocks;
210 size_t nblocks;
211 size_t blocks_alloced;
212 size_t bytes_alloced;
213 uint32_t target_percent;
214 };
215
216 struct z_heap_stress_block {
217 void *ptr;
218 size_t sz;
219 };
220
221 /* Very simple LCRNG (from https://nuclear.llnl.gov/CNP/rng/rngman/node4.html)
222 *
223 * Here to guarantee cross-platform test repeatability.
224 */
rand32(void)225 static uint32_t rand32(void)
226 {
227 static uint64_t state = 123456789; /* seed */
228
229 state = state * 2862933555777941757UL + 3037000493UL;
230
231 return (uint32_t)(state >> 32);
232 }
233
rand_alloc_choice(struct z_heap_stress_rec * sr)234 static bool rand_alloc_choice(struct z_heap_stress_rec *sr)
235 {
236 /* Edge cases: no blocks allocated, and no space for a new one */
237 if (sr->blocks_alloced == 0) {
238 return true;
239 } else if (sr->blocks_alloced >= sr->nblocks) {
240 return false;
241 } else {
242
243 /* The way this works is to scale the chance of choosing to
244 * allocate vs. free such that it's even odds when the heap is
245 * at the target percent, with linear tapering on the low
246 * slope (i.e. we choose to always allocate with an empty
247 * heap, allocate 50% of the time when the heap is exactly at
248 * the target, and always free when above the target). In
249 * practice, the operations aren't quite symmetric (you can
250 * always free, but your allocation might fail), and the units
251 * aren't matched (we're doing math based on bytes allocated
252 * and ignoring the overhead) but this is close enough. And
253 * yes, the math here is coarse (in units of percent), but
254 * that's good enough and fits well inside 32 bit quantities.
255 * (Note precision issue when heap size is above 40MB
256 * though!).
257 */
258 __ASSERT(sr->total_bytes < 0xffffffffU / 100, "too big for u32!");
259 uint32_t full_pct = (100 * sr->bytes_alloced) / sr->total_bytes;
260 uint32_t target = sr->target_percent ? sr->target_percent : 1;
261 uint32_t free_chance = 0xffffffffU;
262
263 if (full_pct < sr->target_percent) {
264 free_chance = full_pct * (0x80000000U / target);
265 }
266
267 return rand32() > free_chance;
268 }
269 }
270
271 /* Chooses a size of block to allocate, logarithmically favoring
272 * smaller blocks (i.e. blocks twice as large are half as frequent
273 */
rand_alloc_size(struct z_heap_stress_rec * sr)274 static size_t rand_alloc_size(struct z_heap_stress_rec *sr)
275 {
276 ARG_UNUSED(sr);
277
278 /* Min scale of 4 means that the half of the requests in the
279 * smallest size have an average size of 8
280 */
281 int scale = 4 + __builtin_clz(rand32());
282
283 return rand32() & BIT_MASK(scale);
284 }
285
286 /* Returns the index of a randomly chosen block to free */
rand_free_choice(struct z_heap_stress_rec * sr)287 static size_t rand_free_choice(struct z_heap_stress_rec *sr)
288 {
289 return rand32() % sr->blocks_alloced;
290 }
291
292 /* General purpose heap stress test. Takes function pointers to allow
293 * for testing multiple heap APIs with the same rig. The alloc and
294 * free functions are passed back the argument as a context pointer.
295 * The "log" function is for readable user output. The total_bytes
296 * argument should reflect the size of the heap being tested. The
297 * scratch array is used to store temporary state and should be sized
298 * about half as large as the heap itself. Returns true on success.
299 */
sys_heap_stress(void * (* alloc_fn)(void * arg,size_t bytes),void (* free_fn)(void * arg,void * p),void * arg,size_t total_bytes,uint32_t op_count,void * scratch_mem,size_t scratch_bytes,int target_percent,struct z_heap_stress_result * result)300 void sys_heap_stress(void *(*alloc_fn)(void *arg, size_t bytes),
301 void (*free_fn)(void *arg, void *p),
302 void *arg, size_t total_bytes,
303 uint32_t op_count,
304 void *scratch_mem, size_t scratch_bytes,
305 int target_percent,
306 struct z_heap_stress_result *result)
307 {
308 struct z_heap_stress_rec sr = {
309 .alloc_fn = alloc_fn,
310 .free_fn = free_fn,
311 .arg = arg,
312 .total_bytes = total_bytes,
313 .blocks = scratch_mem,
314 .nblocks = scratch_bytes / sizeof(struct z_heap_stress_block),
315 .target_percent = target_percent,
316 };
317
318 *result = (struct z_heap_stress_result) {0};
319
320 for (uint32_t i = 0; i < op_count; i++) {
321 if (rand_alloc_choice(&sr)) {
322 size_t sz = rand_alloc_size(&sr);
323 void *p = sr.alloc_fn(sr.arg, sz);
324
325 result->total_allocs++;
326 if (p != NULL) {
327 result->successful_allocs++;
328 sr.blocks[sr.blocks_alloced].ptr = p;
329 sr.blocks[sr.blocks_alloced].sz = sz;
330 sr.blocks_alloced++;
331 sr.bytes_alloced += sz;
332 }
333 } else {
334 int b = rand_free_choice(&sr);
335 void *p = sr.blocks[b].ptr;
336 size_t sz = sr.blocks[b].sz;
337
338 result->total_frees++;
339 sr.blocks[b] = sr.blocks[sr.blocks_alloced - 1];
340 sr.blocks_alloced--;
341 sr.bytes_alloced -= sz;
342 sr.free_fn(sr.arg, p);
343 }
344 result->accumulated_in_use_bytes += sr.bytes_alloced;
345 }
346 }
347
348 /*
349 * Print heap info for debugging / analysis purpose
350 */
heap_print_info(struct z_heap * h,bool dump_chunks)351 void heap_print_info(struct z_heap *h, bool dump_chunks)
352 {
353 int i, nb_buckets = bucket_idx(h, h->end_chunk) + 1;
354 size_t free_bytes, allocated_bytes, total, overhead;
355
356 printk("Heap at %p contains %d units in %d buckets\n\n",
357 chunk_buf(h), h->end_chunk, nb_buckets);
358
359 printk(" bucket# min units total largest largest\n"
360 " threshold chunks (units) (bytes)\n"
361 " -----------------------------------------------------------\n");
362 for (i = 0; i < nb_buckets; i++) {
363 chunkid_t first = h->buckets[i].next;
364 chunksz_t largest = 0;
365 int count = 0;
366
367 if (first) {
368 chunkid_t curr = first;
369 do {
370 count++;
371 largest = MAX(largest, chunk_size(h, curr));
372 curr = next_free_chunk(h, curr);
373 } while (curr != first);
374 }
375 if (count) {
376 printk("%9d %12d %12d %12d %12zd\n",
377 i, (1 << i) - 1 + min_chunk_size(h), count,
378 largest, chunksz_to_bytes(h, largest));
379 }
380 }
381
382 if (dump_chunks) {
383 printk("\nChunk dump:\n");
384 for (chunkid_t c = 0; ; c = right_chunk(h, c)) {
385 printk("chunk %4d: [%c] size=%-4d left=%-4d right=%d\n",
386 c,
387 chunk_used(h, c) ? '*'
388 : solo_free_header(h, c) ? '.'
389 : '-',
390 chunk_size(h, c),
391 left_chunk(h, c),
392 right_chunk(h, c));
393 if (c == h->end_chunk) {
394 break;
395 }
396 }
397 }
398
399 get_alloc_info(h, &allocated_bytes, &free_bytes);
400 /* The end marker chunk has a header. It is part of the overhead. */
401 total = h->end_chunk * CHUNK_UNIT + chunk_header_bytes(h);
402 overhead = total - free_bytes - allocated_bytes;
403 printk("\n%zd free bytes, %zd allocated bytes, overhead = %zd bytes (%zd.%zd%%)\n",
404 free_bytes, allocated_bytes, overhead,
405 (1000 * overhead + total/2) / total / 10,
406 (1000 * overhead + total/2) / total % 10);
407 }
408
sys_heap_print_info(struct sys_heap * heap,bool dump_chunks)409 void sys_heap_print_info(struct sys_heap *heap, bool dump_chunks)
410 {
411 heap_print_info(heap->heap, dump_chunks);
412 }
413
414 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
415
sys_heap_runtime_stats_get(struct sys_heap * heap,struct sys_memory_stats * stats)416 int sys_heap_runtime_stats_get(struct sys_heap *heap,
417 struct sys_memory_stats *stats)
418 {
419 if ((heap == NULL) || (stats == NULL)) {
420 return -EINVAL;
421 }
422
423 stats->free_bytes = heap->heap->free_bytes;
424 stats->allocated_bytes = heap->heap->allocated_bytes;
425 stats->max_allocated_bytes = heap->heap->max_allocated_bytes;
426
427 return 0;
428 }
429
sys_heap_runtime_stats_reset_max(struct sys_heap * heap)430 int sys_heap_runtime_stats_reset_max(struct sys_heap *heap)
431 {
432 if (heap == NULL) {
433 return -EINVAL;
434 }
435
436 heap->heap->max_allocated_bytes = heap->heap->allocated_bytes;
437
438 return 0;
439 }
440
441 #endif
442