1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/__assert.h>
9 #include <zephyr/sys/check.h>
10 #include <zephyr/sys/heap_listener.h>
11 #include <zephyr/sys/mem_blocks.h>
12 #include <zephyr/sys/util.h>
13 
alloc_blocks(sys_mem_blocks_t * mem_block,size_t num_blocks)14 static void *alloc_blocks(sys_mem_blocks_t *mem_block, size_t num_blocks)
15 {
16 	size_t offset;
17 	int r;
18 	uint8_t *blk;
19 	void *ret = NULL;
20 
21 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
22 	k_spinlock_key_t  key = k_spin_lock(&mem_block->lock);
23 #endif
24 
25 	/* Find an unallocated block */
26 	r = sys_bitarray_alloc(mem_block->bitmap, num_blocks, &offset);
27 	if (r == 0) {
28 
29 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
30 		mem_block->used_blocks += (uint32_t)num_blocks;
31 
32 		if (mem_block->max_used_blocks < mem_block->used_blocks) {
33 			mem_block->max_used_blocks = mem_block->used_blocks;
34 		}
35 
36 		k_spin_unlock(&mem_block->lock, key);
37 #endif
38 
39 		/* Calculate the start address of the newly allocated block */
40 
41 		blk = mem_block->buffer + (offset << mem_block->blk_sz_shift);
42 
43 		ret = blk;
44 	}
45 
46 	return ret;
47 }
48 
free_blocks(sys_mem_blocks_t * mem_block,void * ptr,size_t num_blocks)49 static int free_blocks(sys_mem_blocks_t *mem_block, void *ptr, size_t num_blocks)
50 {
51 	size_t offset;
52 	uint8_t *blk = ptr;
53 	int ret = 0;
54 
55 	/* Make sure incoming block is within the mem_block buffer */
56 	if (blk < mem_block->buffer) {
57 		ret = -EFAULT;
58 		goto out;
59 	}
60 
61 	offset = (blk - mem_block->buffer) >> mem_block->blk_sz_shift;
62 	if (offset >= mem_block->num_blocks) {
63 		ret = -EFAULT;
64 		goto out;
65 	}
66 
67 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
68 	k_spinlock_key_t  key = k_spin_lock(&mem_block->lock);
69 #endif
70 	ret = sys_bitarray_free(mem_block->bitmap, num_blocks, offset);
71 
72 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
73 	if (ret == 0) {
74 		mem_block->used_blocks -= (uint32_t) num_blocks;
75 	}
76 
77 	k_spin_unlock(&mem_block->lock, key);
78 #endif
79 
80 out:
81 	return ret;
82 }
83 
sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t * mem_block,size_t count,void ** out_block)84 int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count,
85 				   void **out_block)
86 {
87 	int ret = 0;
88 
89 	__ASSERT_NO_MSG(mem_block != NULL);
90 	__ASSERT_NO_MSG(out_block != NULL);
91 
92 	if (count == 0) {
93 		/* Nothing to allocate */
94 		*out_block = NULL;
95 		goto out;
96 	}
97 
98 	if (count > mem_block->num_blocks) {
99 		/* Definitely not enough blocks to be allocated */
100 		ret = -ENOMEM;
101 		goto out;
102 	}
103 
104 	void *ptr = alloc_blocks(mem_block, count);
105 
106 	if (ptr == NULL) {
107 		ret = -ENOMEM;
108 		goto out;
109 	}
110 
111 	*out_block = ptr;
112 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
113 	heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
114 				   ptr, count << mem_block->blk_sz_shift);
115 #endif
116 
117 out:
118 		return ret;
119 }
120 
sys_mem_blocks_alloc(sys_mem_blocks_t * mem_block,size_t count,void ** out_blocks)121 int sys_mem_blocks_alloc(sys_mem_blocks_t *mem_block, size_t count,
122 			 void **out_blocks)
123 {
124 	int ret = 0;
125 	int i;
126 
127 	__ASSERT_NO_MSG(mem_block != NULL);
128 	__ASSERT_NO_MSG(out_blocks != NULL);
129 	__ASSERT_NO_MSG(mem_block->bitmap != NULL);
130 	__ASSERT_NO_MSG(mem_block->buffer != NULL);
131 
132 	if (count == 0) {
133 		/* Nothing to allocate */
134 		goto out;
135 	}
136 
137 	if (count > mem_block->num_blocks) {
138 		/* Definitely not enough blocks to be allocated */
139 		ret = -ENOMEM;
140 		goto out;
141 	}
142 
143 	for (i = 0; i < count; i++) {
144 		void *ptr = alloc_blocks(mem_block, 1);
145 
146 		if (ptr == NULL) {
147 			break;
148 		}
149 
150 		out_blocks[i] = ptr;
151 
152 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
153 		heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
154 					   ptr, BIT(mem_block->blk_sz_shift));
155 #endif
156 	}
157 
158 	/* If error, free already allocated blocks. */
159 	if (i < count) {
160 		(void)sys_mem_blocks_free(mem_block, i, out_blocks);
161 		ret = -ENOMEM;
162 	}
163 
164 out:
165 	return ret;
166 }
167 
sys_mem_blocks_is_region_free(sys_mem_blocks_t * mem_block,void * in_block,size_t count)168 int sys_mem_blocks_is_region_free(sys_mem_blocks_t *mem_block, void *in_block, size_t count)
169 {
170 	bool result;
171 	size_t offset;
172 
173 	__ASSERT_NO_MSG(mem_block != NULL);
174 	__ASSERT_NO_MSG(mem_block->bitmap != NULL);
175 	__ASSERT_NO_MSG(mem_block->buffer != NULL);
176 
177 	offset = ((uint8_t *)in_block - mem_block->buffer) >> mem_block->blk_sz_shift;
178 
179 	__ASSERT_NO_MSG(offset + count <= mem_block->num_blocks);
180 
181 	result = sys_bitarray_is_region_cleared(mem_block->bitmap, count, offset);
182 	return result;
183 }
184 
sys_mem_blocks_get(sys_mem_blocks_t * mem_block,void * in_block,size_t count)185 int sys_mem_blocks_get(sys_mem_blocks_t *mem_block, void *in_block, size_t count)
186 {
187 	int ret = 0;
188 	int offset;
189 
190 	__ASSERT_NO_MSG(mem_block != NULL);
191 	__ASSERT_NO_MSG(mem_block->bitmap != NULL);
192 	__ASSERT_NO_MSG(mem_block->buffer != NULL);
193 
194 	if (count == 0) {
195 		/* Nothing to allocate */
196 		goto out;
197 	}
198 
199 	offset = ((uint8_t *)in_block - mem_block->buffer) >> mem_block->blk_sz_shift;
200 
201 	if (offset + count > mem_block->num_blocks) {
202 		/* Definitely not enough blocks to be allocated */
203 		ret = -ENOMEM;
204 		goto out;
205 	}
206 
207 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
208 	k_spinlock_key_t  key = k_spin_lock(&mem_block->lock);
209 #endif
210 
211 	ret = sys_bitarray_test_and_set_region(mem_block->bitmap, count, offset, true);
212 
213 	if (ret != 0) {
214 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
215 		k_spin_unlock(&mem_block->lock, key);
216 #endif
217 		ret = -ENOMEM;
218 		goto out;
219 	}
220 
221 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
222 	mem_block->used_blocks += (uint32_t)count;
223 
224 	if (mem_block->max_used_blocks < mem_block->used_blocks) {
225 		mem_block->max_used_blocks = mem_block->used_blocks;
226 	}
227 
228 	k_spin_unlock(&mem_block->lock, key);
229 #endif
230 
231 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
232 	heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
233 			in_block, count << mem_block->blk_sz_shift);
234 #endif
235 
236 out:
237 	return ret;
238 }
239 
240 
sys_mem_blocks_free(sys_mem_blocks_t * mem_block,size_t count,void ** in_blocks)241 int sys_mem_blocks_free(sys_mem_blocks_t *mem_block, size_t count,
242 			void **in_blocks)
243 {
244 	int ret = 0;
245 	int i;
246 
247 	__ASSERT_NO_MSG(mem_block != NULL);
248 	__ASSERT_NO_MSG(in_blocks != NULL);
249 	__ASSERT_NO_MSG(mem_block->bitmap != NULL);
250 	__ASSERT_NO_MSG(mem_block->buffer != NULL);
251 
252 	if (count == 0) {
253 		/* Nothing to be freed. */
254 		goto out;
255 	}
256 
257 	if (count > mem_block->num_blocks) {
258 		ret = -EINVAL;
259 		goto out;
260 	}
261 
262 	for (i = 0; i < count; i++) {
263 		void *ptr = in_blocks[i];
264 
265 		int r = free_blocks(mem_block, ptr, 1);
266 
267 		if (r != 0) {
268 			ret = r;
269 		}
270 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
271 		else {
272 			/*
273 			 * Since we do not keep track of failed free ops,
274 			 * we need to notify free one-by-one, instead of
275 			 * notifying at the end of function.
276 			 */
277 			heap_listener_notify_free(HEAP_ID_FROM_POINTER(mem_block),
278 						  ptr, BIT(mem_block->blk_sz_shift));
279 		}
280 #endif
281 	}
282 
283 out:
284 	return ret;
285 }
286 
sys_mem_blocks_free_contiguous(sys_mem_blocks_t * mem_block,void * block,size_t count)287 int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
288 {
289 	int ret = 0;
290 
291 	__ASSERT_NO_MSG(mem_block != NULL);
292 	__ASSERT_NO_MSG(mem_block->bitmap != NULL);
293 	__ASSERT_NO_MSG(mem_block->buffer != NULL);
294 
295 	if (count == 0) {
296 		/* Nothing to be freed. */
297 		goto out;
298 	}
299 
300 	if (count > mem_block->num_blocks) {
301 		ret = -EINVAL;
302 		goto out;
303 	}
304 
305 	ret = free_blocks(mem_block, block, count);
306 
307 	if (ret != 0) {
308 		goto out;
309 	}
310 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
311 	heap_listener_notify_free(HEAP_ID_FROM_POINTER(mem_block),
312 			block, count << mem_block->blk_sz_shift);
313 #endif
314 
315 out:
316 	return ret;
317 }
318 
sys_multi_mem_blocks_init(sys_multi_mem_blocks_t * group,sys_multi_mem_blocks_choice_fn_t choice_fn)319 void sys_multi_mem_blocks_init(sys_multi_mem_blocks_t *group,
320 			       sys_multi_mem_blocks_choice_fn_t choice_fn)
321 {
322 	group->num_allocators = 0;
323 	group->choice_fn = choice_fn;
324 }
325 
sys_multi_mem_blocks_add_allocator(sys_multi_mem_blocks_t * group,sys_mem_blocks_t * alloc)326 void sys_multi_mem_blocks_add_allocator(sys_multi_mem_blocks_t *group,
327 					sys_mem_blocks_t *alloc)
328 {
329 	__ASSERT_NO_MSG(group->num_allocators < ARRAY_SIZE(group->allocators));
330 
331 	group->allocators[group->num_allocators++] = alloc;
332 }
333 
sys_multi_mem_blocks_alloc(sys_multi_mem_blocks_t * group,void * cfg,size_t count,void ** out_blocks,size_t * blk_size)334 int sys_multi_mem_blocks_alloc(sys_multi_mem_blocks_t *group,
335 			       void *cfg, size_t count,
336 			       void **out_blocks,
337 			       size_t *blk_size)
338 {
339 	sys_mem_blocks_t *allocator;
340 	int ret = 0;
341 
342 	__ASSERT_NO_MSG(group != NULL);
343 	__ASSERT_NO_MSG(out_blocks != NULL);
344 
345 	if (count == 0) {
346 		if (blk_size != NULL) {
347 			*blk_size = 0;
348 		}
349 		goto out;
350 	}
351 
352 	allocator = group->choice_fn(group, cfg);
353 	if (allocator == NULL) {
354 		ret = -EINVAL;
355 		goto out;
356 	}
357 
358 	if (count > allocator->num_blocks) {
359 		ret = -ENOMEM;
360 		goto out;
361 	}
362 
363 	ret = sys_mem_blocks_alloc(allocator, count, out_blocks);
364 
365 	if ((ret == 0) && (blk_size != NULL)) {
366 		*blk_size = BIT(allocator->blk_sz_shift);
367 	}
368 
369 out:
370 	return ret;
371 }
372 
sys_multi_mem_blocks_free(sys_multi_mem_blocks_t * group,size_t count,void ** in_blocks)373 int sys_multi_mem_blocks_free(sys_multi_mem_blocks_t *group,
374 			      size_t count, void **in_blocks)
375 {
376 	int i;
377 	int ret = 0;
378 	sys_mem_blocks_t *allocator = NULL;
379 
380 	__ASSERT_NO_MSG(group != NULL);
381 	__ASSERT_NO_MSG(in_blocks != NULL);
382 
383 	if (count == 0) {
384 		goto out;
385 	}
386 
387 	for (i = 0; i < group->num_allocators; i++) {
388 		/*
389 		 * Find out which allocator the allocated blocks
390 		 * belong to.
391 		 */
392 
393 		uint8_t *start, *end;
394 		sys_mem_blocks_t *one_alloc;
395 
396 		one_alloc = group->allocators[i];
397 		start = one_alloc->buffer;
398 		end = start + (BIT(one_alloc->blk_sz_shift) * one_alloc->num_blocks);
399 
400 		if (((uint8_t *)in_blocks[0] >= start) &&
401 		    ((uint8_t *)in_blocks[0] < end)) {
402 			allocator = one_alloc;
403 			break;
404 		}
405 	}
406 
407 	if (allocator != NULL) {
408 		ret = sys_mem_blocks_free(allocator, count, in_blocks);
409 	} else {
410 		ret = -EINVAL;
411 	}
412 
413 out:
414 	return ret;
415 }
416 
417 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
sys_mem_blocks_runtime_stats_get(sys_mem_blocks_t * mem_block,struct sys_memory_stats * stats)418 int sys_mem_blocks_runtime_stats_get(sys_mem_blocks_t *mem_block,
419 				     struct sys_memory_stats *stats)
420 {
421 	if ((mem_block == NULL) || (stats == NULL)) {
422 		return -EINVAL;
423 	}
424 
425 	stats->allocated_bytes = mem_block->used_blocks <<
426 				 mem_block->blk_sz_shift;
427 	stats->free_bytes = (mem_block->num_blocks << mem_block->blk_sz_shift) -
428 			    stats->allocated_bytes;
429 	stats->max_allocated_bytes = mem_block->max_used_blocks <<
430 				     mem_block->blk_sz_shift;
431 
432 	return 0;
433 }
434 
sys_mem_blocks_runtime_stats_reset_max(sys_mem_blocks_t * mem_block)435 int sys_mem_blocks_runtime_stats_reset_max(sys_mem_blocks_t *mem_block)
436 {
437 	if (mem_block == NULL) {
438 		return -EINVAL;
439 	}
440 
441 	mem_block->max_used_blocks = mem_block->used_blocks;
442 
443 	return 0;
444 }
445 #endif
446