1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/sys/__assert.h>
9 #include <zephyr/sys/check.h>
10 #include <zephyr/sys/heap_listener.h>
11 #include <zephyr/sys/mem_blocks.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/init.h>
14 #include <string.h>
15
alloc_blocks(sys_mem_blocks_t * mem_block,size_t num_blocks)16 static void *alloc_blocks(sys_mem_blocks_t *mem_block, size_t num_blocks)
17 {
18 size_t offset;
19 int r;
20 uint8_t *blk;
21
22 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
23 k_spinlock_key_t key = k_spin_lock(&mem_block->lock);
24 #endif
25
26 /* Find an unallocated block */
27 r = sys_bitarray_alloc(mem_block->bitmap, num_blocks, &offset);
28 if (r != 0) {
29 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
30 k_spin_unlock(&mem_block->lock, key);
31 #endif
32 return NULL;
33 }
34
35
36 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
37 mem_block->info.used_blocks += (uint32_t)num_blocks;
38
39 if (mem_block->info.max_used_blocks < mem_block->info.used_blocks) {
40 mem_block->info.max_used_blocks = mem_block->info.used_blocks;
41 }
42
43 k_spin_unlock(&mem_block->lock, key);
44 #endif
45
46 /* Calculate the start address of the newly allocated block */
47
48 blk = mem_block->buffer + (offset << mem_block->info.blk_sz_shift);
49
50 return blk;
51 }
52
free_blocks(sys_mem_blocks_t * mem_block,void * ptr,size_t num_blocks)53 static int free_blocks(sys_mem_blocks_t *mem_block, void *ptr,
54 size_t num_blocks)
55 {
56 size_t offset;
57 uint8_t *blk = ptr;
58 int ret = 0;
59
60 /* Make sure incoming block is within the mem_block buffer */
61 if (blk < mem_block->buffer) {
62 ret = -EFAULT;
63 goto out;
64 }
65
66 offset = (blk - mem_block->buffer) >> mem_block->info.blk_sz_shift;
67 if (offset >= mem_block->info.num_blocks) {
68 ret = -EFAULT;
69 goto out;
70 }
71
72 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
73 k_spinlock_key_t key = k_spin_lock(&mem_block->lock);
74 #endif
75 ret = sys_bitarray_free(mem_block->bitmap, num_blocks, offset);
76
77 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
78 if (ret == 0) {
79 mem_block->info.used_blocks -= (uint32_t)num_blocks;
80 }
81
82 k_spin_unlock(&mem_block->lock, key);
83 #endif
84
85 out:
86 return ret;
87 }
88
sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t * mem_block,size_t count,void ** out_block)89 int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count,
90 void **out_block)
91 {
92 int ret = 0;
93
94 __ASSERT_NO_MSG(mem_block != NULL);
95 __ASSERT_NO_MSG(out_block != NULL);
96
97 if (count == 0) {
98 /* Nothing to allocate */
99 *out_block = NULL;
100 goto out;
101 }
102
103 if (count > mem_block->info.num_blocks) {
104 /* Definitely not enough blocks to be allocated */
105 ret = -ENOMEM;
106 goto out;
107 }
108
109 void *ptr = alloc_blocks(mem_block, count);
110
111 if (ptr == NULL) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 *out_block = ptr;
117 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
118 heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
119 ptr, count << mem_block->info.blk_sz_shift);
120 #endif
121
122 out:
123 return ret;
124 }
125
sys_mem_blocks_alloc(sys_mem_blocks_t * mem_block,size_t count,void ** out_blocks)126 int sys_mem_blocks_alloc(sys_mem_blocks_t *mem_block, size_t count,
127 void **out_blocks)
128 {
129 int ret = 0;
130 int i;
131
132 __ASSERT_NO_MSG(mem_block != NULL);
133 __ASSERT_NO_MSG(out_blocks != NULL);
134 __ASSERT_NO_MSG(mem_block->bitmap != NULL);
135 __ASSERT_NO_MSG(mem_block->buffer != NULL);
136
137 if (count == 0) {
138 /* Nothing to allocate */
139 goto out;
140 }
141
142 if (count > mem_block->info.num_blocks) {
143 /* Definitely not enough blocks to be allocated */
144 ret = -ENOMEM;
145 goto out;
146 }
147
148 for (i = 0; i < count; i++) {
149 void *ptr = alloc_blocks(mem_block, 1);
150
151 if (ptr == NULL) {
152 break;
153 }
154
155 out_blocks[i] = ptr;
156
157 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
158 heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
159 ptr,
160 BIT(mem_block->info.blk_sz_shift));
161 #endif
162 }
163
164 /* If error, free already allocated blocks. */
165 if (i < count) {
166 (void)sys_mem_blocks_free(mem_block, i, out_blocks);
167 ret = -ENOMEM;
168 }
169
170 out:
171 return ret;
172 }
173
sys_mem_blocks_is_region_free(sys_mem_blocks_t * mem_block,void * in_block,size_t count)174 int sys_mem_blocks_is_region_free(sys_mem_blocks_t *mem_block, void *in_block,
175 size_t count)
176 {
177 bool result;
178 size_t offset;
179
180 __ASSERT_NO_MSG(mem_block != NULL);
181 __ASSERT_NO_MSG(mem_block->bitmap != NULL);
182 __ASSERT_NO_MSG(mem_block->buffer != NULL);
183
184 offset = ((uint8_t *)in_block - mem_block->buffer) >>
185 mem_block->info.blk_sz_shift;
186
187 __ASSERT_NO_MSG(offset + count <= mem_block->info.num_blocks);
188
189 result = sys_bitarray_is_region_cleared(mem_block->bitmap, count,
190 offset);
191 return result;
192 }
193
sys_mem_blocks_get(sys_mem_blocks_t * mem_block,void * in_block,size_t count)194 int sys_mem_blocks_get(sys_mem_blocks_t *mem_block, void *in_block, size_t count)
195 {
196 int ret = 0;
197 int offset;
198
199 __ASSERT_NO_MSG(mem_block != NULL);
200 __ASSERT_NO_MSG(mem_block->bitmap != NULL);
201 __ASSERT_NO_MSG(mem_block->buffer != NULL);
202
203 if (count == 0) {
204 /* Nothing to allocate */
205 goto out;
206 }
207
208 offset = ((uint8_t *)in_block - mem_block->buffer) >>
209 mem_block->info.blk_sz_shift;
210
211 if (offset + count > mem_block->info.num_blocks) {
212 /* Definitely not enough blocks to be allocated */
213 ret = -ENOMEM;
214 goto out;
215 }
216
217 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
218 k_spinlock_key_t key = k_spin_lock(&mem_block->lock);
219 #endif
220
221 ret = sys_bitarray_test_and_set_region(mem_block->bitmap, count,
222 offset, true);
223
224 if (ret != 0) {
225 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
226 k_spin_unlock(&mem_block->lock, key);
227 #endif
228 ret = -ENOMEM;
229 goto out;
230 }
231
232 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
233 mem_block->info.used_blocks += (uint32_t)count;
234
235 if (mem_block->info.max_used_blocks < mem_block->info.used_blocks) {
236 mem_block->info.max_used_blocks = mem_block->info.used_blocks;
237 }
238
239 k_spin_unlock(&mem_block->lock, key);
240 #endif
241
242 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
243 heap_listener_notify_alloc(HEAP_ID_FROM_POINTER(mem_block),
244 in_block, count << mem_block->info.blk_sz_shift);
245 #endif
246
247 out:
248 return ret;
249 }
250
251
sys_mem_blocks_free(sys_mem_blocks_t * mem_block,size_t count,void ** in_blocks)252 int sys_mem_blocks_free(sys_mem_blocks_t *mem_block, size_t count,
253 void **in_blocks)
254 {
255 int ret = 0;
256 int i;
257
258 __ASSERT_NO_MSG(mem_block != NULL);
259 __ASSERT_NO_MSG(in_blocks != NULL);
260 __ASSERT_NO_MSG(mem_block->bitmap != NULL);
261 __ASSERT_NO_MSG(mem_block->buffer != NULL);
262
263 if (count == 0) {
264 /* Nothing to be freed. */
265 goto out;
266 }
267
268 if (count > mem_block->info.num_blocks) {
269 ret = -EINVAL;
270 goto out;
271 }
272
273 for (i = 0; i < count; i++) {
274 void *ptr = in_blocks[i];
275
276 int r = free_blocks(mem_block, ptr, 1);
277
278 if (r != 0) {
279 ret = r;
280 }
281 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
282 else {
283 /*
284 * Since we do not keep track of failed free ops,
285 * we need to notify free one-by-one, instead of
286 * notifying at the end of function.
287 */
288 heap_listener_notify_free(HEAP_ID_FROM_POINTER(mem_block),
289 ptr, BIT(mem_block->info.blk_sz_shift));
290 }
291 #endif
292 }
293
294 out:
295 return ret;
296 }
297
sys_mem_blocks_free_contiguous(sys_mem_blocks_t * mem_block,void * block,size_t count)298 int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
299 {
300 int ret = 0;
301
302 __ASSERT_NO_MSG(mem_block != NULL);
303 __ASSERT_NO_MSG(mem_block->bitmap != NULL);
304 __ASSERT_NO_MSG(mem_block->buffer != NULL);
305
306 if (count == 0) {
307 /* Nothing to be freed. */
308 goto out;
309 }
310
311 if (count > mem_block->info.num_blocks) {
312 ret = -EINVAL;
313 goto out;
314 }
315
316 ret = free_blocks(mem_block, block, count);
317
318 if (ret != 0) {
319 goto out;
320 }
321 #ifdef CONFIG_SYS_MEM_BLOCKS_LISTENER
322 heap_listener_notify_free(HEAP_ID_FROM_POINTER(mem_block),
323 block, count << mem_block->info.blk_sz_shift);
324 #endif
325
326 out:
327 return ret;
328 }
329
sys_multi_mem_blocks_init(sys_multi_mem_blocks_t * group,sys_multi_mem_blocks_choice_fn_t choice_fn)330 void sys_multi_mem_blocks_init(sys_multi_mem_blocks_t *group,
331 sys_multi_mem_blocks_choice_fn_t choice_fn)
332 {
333 group->num_allocators = 0;
334 group->choice_fn = choice_fn;
335 }
336
sys_multi_mem_blocks_add_allocator(sys_multi_mem_blocks_t * group,sys_mem_blocks_t * alloc)337 void sys_multi_mem_blocks_add_allocator(sys_multi_mem_blocks_t *group,
338 sys_mem_blocks_t *alloc)
339 {
340 __ASSERT_NO_MSG(group->num_allocators < ARRAY_SIZE(group->allocators));
341
342 group->allocators[group->num_allocators++] = alloc;
343 }
344
sys_multi_mem_blocks_alloc(sys_multi_mem_blocks_t * group,void * cfg,size_t count,void ** out_blocks,size_t * blk_size)345 int sys_multi_mem_blocks_alloc(sys_multi_mem_blocks_t *group,
346 void *cfg, size_t count,
347 void **out_blocks,
348 size_t *blk_size)
349 {
350 sys_mem_blocks_t *allocator;
351 int ret = 0;
352
353 __ASSERT_NO_MSG(group != NULL);
354 __ASSERT_NO_MSG(out_blocks != NULL);
355
356 if (count == 0) {
357 if (blk_size != NULL) {
358 *blk_size = 0;
359 }
360 goto out;
361 }
362
363 allocator = group->choice_fn(group, cfg);
364 if (allocator == NULL) {
365 ret = -EINVAL;
366 goto out;
367 }
368
369 if (count > allocator->info.num_blocks) {
370 ret = -ENOMEM;
371 goto out;
372 }
373
374 ret = sys_mem_blocks_alloc(allocator, count, out_blocks);
375
376 if ((ret == 0) && (blk_size != NULL)) {
377 *blk_size = BIT(allocator->info.blk_sz_shift);
378 }
379
380 out:
381 return ret;
382 }
383
sys_multi_mem_blocks_free(sys_multi_mem_blocks_t * group,size_t count,void ** in_blocks)384 int sys_multi_mem_blocks_free(sys_multi_mem_blocks_t *group,
385 size_t count, void **in_blocks)
386 {
387 int i;
388 int ret = 0;
389 sys_mem_blocks_t *allocator = NULL;
390
391 __ASSERT_NO_MSG(group != NULL);
392 __ASSERT_NO_MSG(in_blocks != NULL);
393
394 if (count == 0) {
395 goto out;
396 }
397
398 for (i = 0; i < group->num_allocators; i++) {
399 /*
400 * Find out which allocator the allocated blocks
401 * belong to.
402 */
403
404 uint8_t *start, *end;
405 sys_mem_blocks_t *one_alloc;
406
407 one_alloc = group->allocators[i];
408 start = one_alloc->buffer;
409 end = start + (BIT(one_alloc->info.blk_sz_shift) *
410 one_alloc->info.num_blocks);
411
412 if (((uint8_t *)in_blocks[0] >= start) &&
413 ((uint8_t *)in_blocks[0] < end)) {
414 allocator = one_alloc;
415 break;
416 }
417 }
418
419 if (allocator != NULL) {
420 ret = sys_mem_blocks_free(allocator, count, in_blocks);
421 } else {
422 ret = -EINVAL;
423 }
424
425 out:
426 return ret;
427 }
428
429 #ifdef CONFIG_SYS_MEM_BLOCKS_RUNTIME_STATS
sys_mem_blocks_runtime_stats_get(sys_mem_blocks_t * mem_block,struct sys_memory_stats * stats)430 int sys_mem_blocks_runtime_stats_get(sys_mem_blocks_t *mem_block,
431 struct sys_memory_stats *stats)
432 {
433 if ((mem_block == NULL) || (stats == NULL)) {
434 return -EINVAL;
435 }
436
437 stats->allocated_bytes = mem_block->info.used_blocks <<
438 mem_block->info.blk_sz_shift;
439 stats->free_bytes = (mem_block->info.num_blocks <<
440 mem_block->info.blk_sz_shift) -
441 stats->allocated_bytes;
442 stats->max_allocated_bytes = mem_block->info.max_used_blocks <<
443 mem_block->info.blk_sz_shift;
444
445 return 0;
446 }
447
sys_mem_blocks_runtime_stats_reset_max(sys_mem_blocks_t * mem_block)448 int sys_mem_blocks_runtime_stats_reset_max(sys_mem_blocks_t *mem_block)
449 {
450 if (mem_block == NULL) {
451 return -EINVAL;
452 }
453
454 mem_block->info.max_used_blocks = mem_block->info.used_blocks;
455
456 return 0;
457 }
458 #endif
459
460 #ifdef CONFIG_OBJ_CORE_STATS_SYS_MEM_BLOCKS
sys_mem_blocks_stats_raw(struct k_obj_core * obj_core,void * stats)461 static int sys_mem_blocks_stats_raw(struct k_obj_core *obj_core, void *stats)
462 {
463 struct sys_mem_blocks *block;
464 k_spinlock_key_t key;
465
466 block = CONTAINER_OF(obj_core, struct sys_mem_blocks, obj_core);
467
468 key = k_spin_lock(&block->lock);
469
470 memcpy(stats, &block->info, sizeof(block->info));
471
472 k_spin_unlock(&block->lock, key);
473
474 return 0;
475 }
476
sys_mem_blocks_stats_query(struct k_obj_core * obj_core,void * stats)477 static int sys_mem_blocks_stats_query(struct k_obj_core *obj_core, void *stats)
478 {
479 struct sys_mem_blocks *block;
480 k_spinlock_key_t key;
481 struct sys_memory_stats *ptr = stats;
482
483 block = CONTAINER_OF(obj_core, struct sys_mem_blocks, obj_core);
484
485 key = k_spin_lock(&block->lock);
486
487 ptr->free_bytes = (block->info.num_blocks - block->info.used_blocks) <<
488 block->info.blk_sz_shift;
489 ptr->allocated_bytes = block->info.used_blocks <<
490 block->info.blk_sz_shift;
491 ptr->max_allocated_bytes = block->info.max_used_blocks <<
492 block->info.blk_sz_shift;
493
494 k_spin_unlock(&block->lock, key);
495
496 return 0;
497 }
498
sys_mem_blocks_stats_reset(struct k_obj_core * obj_core)499 static int sys_mem_blocks_stats_reset(struct k_obj_core *obj_core)
500 {
501 struct sys_mem_blocks *block;
502 k_spinlock_key_t key;
503
504 block = CONTAINER_OF(obj_core, struct sys_mem_blocks, obj_core);
505
506 key = k_spin_lock(&block->lock);
507 block->info.max_used_blocks = block->info.used_blocks;
508 k_spin_unlock(&block->lock, key);
509
510 return 0;
511 }
512
513 static struct k_obj_type obj_type_sys_mem_blocks;
514
515 static struct k_obj_core_stats_desc sys_mem_blocks_stats_desc = {
516 .raw_size = sizeof(struct sys_mem_blocks_info),
517 .query_size = sizeof(struct sys_memory_stats),
518 .raw = sys_mem_blocks_stats_raw,
519 .query = sys_mem_blocks_stats_query,
520 .reset = sys_mem_blocks_stats_reset,
521 .disable = NULL,
522 .enable = NULL,
523 };
524 #endif
525
526 #ifdef CONFIG_OBJ_CORE_SYS_MEM_BLOCKS
527 static struct k_obj_type obj_type_sys_mem_blocks;
528
init_sys_mem_blocks_obj_core_list(void)529 static int init_sys_mem_blocks_obj_core_list(void)
530 {
531 /* Initialize the sys_mem_blocks object type */
532
533 z_obj_type_init(&obj_type_sys_mem_blocks, K_OBJ_TYPE_MEM_BLOCK_ID,
534 offsetof(struct sys_mem_blocks, obj_core));
535
536 #ifdef CONFIG_OBJ_CORE_STATS_SYS_MEM_BLOCKS
537 k_obj_type_stats_init(&obj_type_sys_mem_blocks,
538 &sys_mem_blocks_stats_desc);
539 #endif
540
541 /* Initialize statically defined sys_mem_blocks */
542
543 STRUCT_SECTION_FOREACH_ALTERNATE(sys_mem_blocks_ptr,
544 sys_mem_blocks *, block_pp) {
545 k_obj_core_init_and_link(K_OBJ_CORE(*block_pp),
546 &obj_type_sys_mem_blocks);
547 #ifdef CONFIG_OBJ_CORE_STATS_SYS_MEM_BLOCKS
548 k_obj_core_stats_register(K_OBJ_CORE(*block_pp),
549 &(*block_pp)->info,
550 sizeof(struct sys_mem_blocks_info));
551 #endif
552 }
553
554 return 0;
555 }
556
557 SYS_INIT(init_sys_mem_blocks_obj_core_list, PRE_KERNEL_1,
558 CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
559 #endif
560