1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/sys/sys_heap.h>
7 #include <zephyr/sys/util.h>
8 #include <zephyr/kernel.h>
9 #include "heap.h"
10 
11 /* White-box sys_heap validation code.  Uses internal data structures.
12  * Not expected to be useful in production apps.  This checks every
13  * header field of every chunk and returns true if the totality of the
14  * data structure is a valid heap.  It doesn't necessarily tell you
15  * that it is the CORRECT heap given the history of alloc/free calls
16  * that it can't inspect.  In a pathological case, you can imagine
17  * something scribbling a copy of a previously-valid heap on top of a
18  * running one and corrupting it. YMMV.
19  */
20 
21 #define VALIDATE(cond) do { if (!(cond)) { return false; } } while (0)
22 
in_bounds(struct z_heap * h,chunkid_t c)23 static bool in_bounds(struct z_heap *h, chunkid_t c)
24 {
25 	VALIDATE(c >= right_chunk(h, 0));
26 	VALIDATE(c < h->end_chunk);
27 	VALIDATE(chunk_size(h, c) < h->end_chunk);
28 	return true;
29 }
30 
valid_chunk(struct z_heap * h,chunkid_t c)31 static bool valid_chunk(struct z_heap *h, chunkid_t c)
32 {
33 	VALIDATE(chunk_size(h, c) > 0);
34 	VALIDATE(c + chunk_size(h, c) <= h->end_chunk);
35 	VALIDATE(in_bounds(h, c));
36 	VALIDATE(right_chunk(h, left_chunk(h, c)) == c);
37 	VALIDATE(left_chunk(h, right_chunk(h, c)) == c);
38 	if (chunk_used(h, c)) {
39 		VALIDATE(!solo_free_header(h, c));
40 	} else {
41 		VALIDATE(chunk_used(h, left_chunk(h, c)));
42 		VALIDATE(chunk_used(h, right_chunk(h, c)));
43 		if (!solo_free_header(h, c)) {
44 			VALIDATE(in_bounds(h, prev_free_chunk(h, c)));
45 			VALIDATE(in_bounds(h, next_free_chunk(h, c)));
46 		}
47 	}
48 	return true;
49 }
50 
51 /* Validate multiple state dimensions for the bucket "next" pointer
52  * and see that they match.  Probably should unify the design a
53  * bit...
54  */
check_nexts(struct z_heap * h,int bidx)55 static inline void check_nexts(struct z_heap *h, int bidx)
56 {
57 	struct z_heap_bucket *b = &h->buckets[bidx];
58 
59 	bool emptybit = (h->avail_buckets & BIT(bidx)) == 0;
60 	bool emptylist = b->next == 0;
61 	bool empties_match = emptybit == emptylist;
62 
63 	(void)empties_match;
64 	CHECK(empties_match);
65 
66 	if (b->next != 0) {
67 		CHECK(valid_chunk(h, b->next));
68 	}
69 }
70 
sys_heap_validate(struct sys_heap * heap)71 bool sys_heap_validate(struct sys_heap *heap)
72 {
73 	struct z_heap *h = heap->heap;
74 	chunkid_t c;
75 
76 	/*
77 	 * Walk through the chunks linearly, verifying sizes and end pointer.
78 	 */
79 	for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
80 		if (!valid_chunk(h, c)) {
81 			return false;
82 		}
83 	}
84 	if (c != h->end_chunk) {
85 		return false;  /* Should have exactly consumed the buffer */
86 	}
87 
88 #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
89 	/*
90 	 * Validate sys_heap_runtime_stats_get API.
91 	 * Iterate all chunks in sys_heap to get total allocated bytes and
92 	 * free bytes, then compare with the results of
93 	 * sys_heap_runtime_stats_get function.
94 	 */
95 	size_t allocated_bytes, free_bytes;
96 	struct sys_memory_stats stat;
97 
98 	get_alloc_info(h, &allocated_bytes, &free_bytes);
99 	sys_heap_runtime_stats_get(heap, &stat);
100 	if ((stat.allocated_bytes != allocated_bytes) ||
101 	    (stat.free_bytes != free_bytes)) {
102 		return false;
103 	}
104 #endif
105 
106 	/* Check the free lists: entry count should match, empty bit
107 	 * should be correct, and all chunk entries should point into
108 	 * valid unused chunks.  Mark those chunks USED, temporarily.
109 	 */
110 	for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
111 		chunkid_t c0 = h->buckets[b].next;
112 		uint32_t n = 0;
113 
114 		check_nexts(h, b);
115 
116 		for (c = c0; c != 0 && (n == 0 || c != c0);
117 		     n++, c = next_free_chunk(h, c)) {
118 			if (!valid_chunk(h, c)) {
119 				return false;
120 			}
121 			set_chunk_used(h, c, true);
122 		}
123 
124 		bool empty = (h->avail_buckets & BIT(b)) == 0;
125 		bool zero = n == 0;
126 
127 		if (empty != zero) {
128 			return false;
129 		}
130 
131 		if (empty && h->buckets[b].next != 0) {
132 			return false;
133 		}
134 	}
135 
136 	/*
137 	 * Walk through the chunks linearly again, verifying that all chunks
138 	 * but solo headers are now USED (i.e. all free blocks were found
139 	 * during enumeration).  Mark all such blocks UNUSED and solo headers
140 	 * USED.
141 	 */
142 	chunkid_t prev_chunk = 0;
143 
144 	for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
145 		if (!chunk_used(h, c) && !solo_free_header(h, c)) {
146 			return false;
147 		}
148 		if (left_chunk(h, c) != prev_chunk) {
149 			return false;
150 		}
151 		prev_chunk = c;
152 
153 		set_chunk_used(h, c, solo_free_header(h, c));
154 	}
155 	if (c != h->end_chunk) {
156 		return false;  /* Should have exactly consumed the buffer */
157 	}
158 
159 	/* Go through the free lists again checking that the linear
160 	 * pass caught all the blocks and that they now show UNUSED.
161 	 * Mark them USED.
162 	 */
163 	for (int b = 0; b <= bucket_idx(h, h->end_chunk); b++) {
164 		chunkid_t c0 = h->buckets[b].next;
165 		int n = 0;
166 
167 		if (c0 == 0) {
168 			continue;
169 		}
170 
171 		for (c = c0; n == 0 || c != c0; n++, c = next_free_chunk(h, c)) {
172 			if (chunk_used(h, c)) {
173 				return false;
174 			}
175 			set_chunk_used(h, c, true);
176 		}
177 	}
178 
179 	/* Now we are valid, but have managed to invert all the in-use
180 	 * fields.  One more linear pass to fix them up
181 	 */
182 	for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
183 		set_chunk_used(h, c, !chunk_used(h, c));
184 	}
185 	return true;
186 }
187