1 /*! *********************************************************************************
2  * Copyright (c) 2015, Freescale Semiconductor, Inc.
3  * Copyright 2016-2022, 2023 NXP
4  * All rights reserved.
5  *
6  * \file
7  *
8  * This is the source file for the Memory Manager.
9  *
10  * SPDX-License-Identifier: BSD-3-Clause
11  ********************************************************************************** */
12 
13 /*! *********************************************************************************
14 *************************************************************************************
15 * Include
16 *************************************************************************************
17 ********************************************************************************** */
18 
19 #include "fsl_common.h"
20 #if defined(MEM_STATISTICS_INTERNAL) || defined(MEM_MANAGER_BENCH)
21 #include "fsl_component_timer_manager.h"
22 #include "fsl_component_mem_manager_internal.h"
23 #endif /* MEM_STATISTICS_INTERNAL MEM_MANAGER_BENCH*/
24 #include "fsl_component_mem_manager.h"
25 #if defined(gDebugConsoleEnable_d) && (gDebugConsoleEnable_d == 1)
26 #include "fsl_debug_console.h"
27 #endif
28 
29 #if defined(gMemManagerLight) && (gMemManagerLight == 1)
30 
31 /*  Selects the allocation scheme that will be used by the MemManagerLight
32     0: Allocates the first free block available in the heap, no matter its size
33     1: Allocates the first free block whose size is at most the double of the requested size
34     2: Allocates the first free block whose size is at most the 4/3 of the requested size     */
35 #ifndef cMemManagerLightReuseFreeBlocks
36 #define cMemManagerLightReuseFreeBlocks 1
37 #endif
38 
39 #if defined(cMemManagerLightReuseFreeBlocks) && (cMemManagerLightReuseFreeBlocks > 0)
40 /* Because a more restrictive on the size of the free blocks when cMemManagerLightReuseFreeBlocks
41  *  is set, we need to enable a garbage collector to clean up the free block when possible .
42  * When set gMemManagerLightFreeBlocksCleanUp is used to select between 2 policies:
43  *  1: on each bufffer free, the allocator parses the free list in the forward direction and
44  *     attempts to merge the freeed buffer with the top unused remainder of the region.
45  *  2: In addition to behaviour described in 1, allocator parses the list backwards to merge
46  *     previous contiguous members of the free list (free blocks) if adjacent to the last block.
47  *     In this case they meld in the top of the unused region.
48  */
49 #ifndef gMemManagerLightFreeBlocksCleanUp
50 #define gMemManagerLightFreeBlocksCleanUp 2
51 #endif
52 #endif
53 
54 #ifndef gMemManagerLightGuardsCheckEnable
55 #define gMemManagerLightGuardsCheckEnable 0
56 #endif
57 
58 /*! Extend Heap usage beyond the size defined by MinimalHeapSize_c up to __HEAP_end__ symbol address
59  *   to make full use of the remaining available SRAM for the dynamic allocator. Also, only the data up to the
60  *   highest allocated block will be retained by calling the @function MEM_GetHeapUpperLimit() from the power
61  *   manager.
62  *   When this flag is turned to 1 :
63  *     -  __HEAP_end__ linker symbol shall be defined in linker script to be the highest allowed address
64  *   used by the fsl_component_memory_manager_light
65  *     - .heap section shall be defined and placed after bss and zi sections to make sure no data is located
66  *   up to __HEAP_end__ symbol.
67  *   @Warning, no data shall be placed after memHeap symbol address up to __HEAP_end__ . If an other
68  *   memory allocator uses a memory area between __HEAP_start__ and __HEAP_end__, area may overlap
69  *   with fsl_component_memory_manager_light, so this flag shall be kept to 0
70  */
71 #ifndef gMemManagerLightExtendHeapAreaUsage
72 #define gMemManagerLightExtendHeapAreaUsage 0
73 #endif
74 
75 /*! *********************************************************************************
76 *************************************************************************************
77 * Private macros
78 *************************************************************************************
79 ********************************************************************************** */
80 #ifndef MAX_UINT16
81 #define MAX_UINT16 0x00010000U
82 #endif
83 
84 #define MEMMANAGER_BLOCK_INVALID (uint16_t)0x0    /* Used to remove a block in the heap - debug only */
85 #define MEMMANAGER_BLOCK_FREE    (uint16_t)0xBA00 /* Mark a previous allocated block as free         */
86 #define MEMMANAGER_BLOCK_USED    (uint16_t)0xBABE /* Mark the block as allocated                     */
87 
88 #define BLOCK_HDR_SIZE (ROUNDUP_WORD(sizeof(blockHeader_t)))
89 
90 #define ROUNDUP_WORD(__x) (((((__x)-1U) & ~0x3U) + 4U) & 0XFFFFFFFFU)
91 
92 #define BLOCK_HDR_PREGUARD_SIZE     28U
93 #define BLOCK_HDR_PREGUARD_PATTERN  0x28U
94 #define BLOCK_HDR_POSTGUARD_SIZE    28U
95 #define BLOCK_HDR_POSTGUARD_PATTERN 0x39U
96 
97 #if defined(__IAR_SYSTEMS_ICC__)
98 #define __mem_get_LR() __get_LR()
99 #elif defined(__GNUC__)
100 #define __mem_get_LR() __builtin_return_address(0U)
101 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
102 #define __mem_get_LR() __return_address()
103 #endif
104 
105 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
106 #define gMemManagerLightAddPreGuard  1
107 #define gMemManagerLightAddPostGuard 1
108 #endif
109 
110 #ifndef gMemManagerLightAddPreGuard
111 #define gMemManagerLightAddPreGuard 0
112 #endif
113 
114 #ifndef gMemManagerLightAddPostGuard
115 #define gMemManagerLightAddPostGuard 0
116 #endif
117 
118 #if defined(__IAR_SYSTEMS_ICC__) && (defined __CORTEX_M) && \
119     ((__CORTEX_M == 4U) || (__CORTEX_M == 7U) || (__CORTEX_M == 33U))
120 #define D_BARRIER __asm("DSB"); /* __DSB() could not be used */
121 #else
122 #define D_BARRIER
123 #endif
124 #define ENABLE_GLOBAL_IRQ(reg) \
125     D_BARRIER;                 \
126     EnableGlobalIRQ(reg)
127 #define KB(x) ((x) << 10u)
128 
129 /************************************************************************************
130 *************************************************************************************
131 * Private type definitions
132 *************************************************************************************
133 ************************************************************************************/
134 
135 typedef struct blockHeader_s
136 {
137 #if defined(gMemManagerLightAddPreGuard) && (gMemManagerLightAddPreGuard == 1)
138     uint8_t preguard[BLOCK_HDR_PREGUARD_SIZE];
139 #endif
140     uint16_t used;
141     uint8_t area_id;
142     uint8_t reserved;
143 #if defined(MEM_STATISTICS_INTERNAL)
144     uint16_t buff_size;
145 #endif
146     struct blockHeader_s *next;
147     struct blockHeader_s *next_free;
148     struct blockHeader_s *prev_free;
149 #ifdef MEM_TRACKING
150     void *first_alloc_caller;
151     void *second_alloc_caller;
152 #endif
153 #if defined(gMemManagerLightAddPostGuard) && (gMemManagerLightAddPostGuard == 1)
154     uint8_t postguard[BLOCK_HDR_POSTGUARD_SIZE];
155 #endif
156 } blockHeader_t;
157 
158 typedef struct freeBlockHeaderList_s
159 {
160     struct blockHeader_s *head;
161     struct blockHeader_s *tail;
162 } freeBlockHeaderList_t;
163 
164 typedef union void_ptr_tag
165 {
166     uint32_t raw_address;
167     uint32_t *address_ptr;
168     void *void_ptr;
169     blockHeader_t *block_hdr_ptr;
170 } void_ptr_t;
171 typedef struct _memAreaPriv_s
172 {
173     freeBlockHeaderList_t FreeBlockHdrList;
174 #ifdef MEM_STATISTICS_INTERNAL
175     mem_statis_t statistics;
176 #endif
177 } memAreaPriv_t;
178 
179 typedef struct _mem_area_priv_desc_s
180 {
181     memAreaCfg_t *next;       /*< Next registered RAM area descriptor. */
182     void_ptr_t start_address; /*< Start address of RAM area. */
183     void_ptr_t end_address;   /*< End address of registered RAM area. */
184     uint16_t flags;           /*< BIT(0) means not member of default pool, other bits RFFU */
185     uint16_t reserved;        /*< alignment padding */
186     uint32_t low_watermark;
187     union
188     {
189         uint8_t internal_ctx[MML_INTERNAL_STRUCT_SZ]; /* Placeholder for internal allocator data */
190         memAreaPriv_t ctx;
191     };
192 } memAreaPrivDesc_t;
193 
194 /*! *********************************************************************************
195 *************************************************************************************
196 * Private memory declarations
197 *************************************************************************************
198 ********************************************************************************** */
199 
200 #ifndef MEMORY_POOL_GLOBAL_VARIABLE_ALLOC
201 /* Allocate memHeap array in the .heap section to ensure the size of the .heap section is large enough
202    for the application
203    However, the real heap used at run time will cover all the .heap section so this area can be bigger
204    than the requested MinimalHeapSize_c - see memHeapEnd */
205 #if defined(__IAR_SYSTEMS_ICC__)
206 #pragma location = ".heap"
207 static uint32_t memHeap[MinimalHeapSize_c / sizeof(uint32_t)];
208 #elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
209 static uint32_t memHeap[MinimalHeapSize_c / sizeof(uint32_t)] __attribute__((section(".heap")));
210 #elif defined(__GNUC__)
211 static uint32_t memHeap[MinimalHeapSize_c / sizeof(uint32_t)] __attribute__((section(".heap, \"aw\", %nobits @")));
212 #else
213 #error "Compiler unknown!"
214 #endif
215 
216 #if defined(gMemManagerLightExtendHeapAreaUsage) && (gMemManagerLightExtendHeapAreaUsage == 1)
217 #if defined(__ARMCC_VERSION)
218 extern uint32_t Image$$ARM_LIB_STACK$$Base[];
219 static const uint32_t memHeapEnd = (uint32_t)&Image$$ARM_LIB_STACK$$Base;
220 #else
221 extern uint32_t __HEAP_end__[];
222 static const uint32_t memHeapEnd = (uint32_t)&__HEAP_end__;
223 #endif
224 #else
225 static const uint32_t memHeapEnd = (uint32_t)(memHeap + MinimalHeapSize_c / sizeof(uint32_t));
226 #endif
227 
228 #else
229 extern uint32_t *memHeap;
230 extern uint32_t memHeapEnd;
231 #endif /* MEMORY_POOL_GLOBAL_VARIABLE_ALLOC */
232 
233 static memAreaPrivDesc_t heap_area_list;
234 
235 #ifdef MEM_STATISTICS_INTERNAL
236 static mem_statis_t s_memStatis;
237 #endif /* MEM_STATISTICS_INTERNAL */
238 
239 #if defined(gFSCI_MemAllocTest_Enabled_d) && (gFSCI_MemAllocTest_Enabled_d)
240 extern mem_alloc_test_status_t FSCI_MemAllocTestCanAllocate(void *pCaller);
241 #endif
242 
243 /*! *********************************************************************************
244 *************************************************************************************
245 * Private functions
246 *************************************************************************************
247 ********************************************************************************** */
248 
249 #ifdef MEM_STATISTICS_INTERNAL
MEM_Inits_memStatis(mem_statis_t * s_memStatis_)250 static void MEM_Inits_memStatis(mem_statis_t *s_memStatis_)
251 {
252     (void)memset(s_memStatis_, 0U, sizeof(mem_statis_t));
253     SystemCoreClockUpdate();
254 }
255 
MEM_BufferAllocates_memStatis(void * buffer,uint32_t time,uint32_t requestedSize)256 static void MEM_BufferAllocates_memStatis(void *buffer, uint32_t time, uint32_t requestedSize)
257 {
258     void_ptr_t buffer_ptr;
259     void_ptr_t blockHdr_ptr;
260     blockHeader_t *BlockHdr;
261 
262     /* Using union to fix Misra */
263     buffer_ptr.void_ptr      = buffer;
264     blockHdr_ptr.raw_address = buffer_ptr.raw_address - BLOCK_HDR_SIZE;
265     BlockHdr                 = blockHdr_ptr.block_hdr_ptr;
266 
267     /* existing block must have a BlockHdr and a next BlockHdr */
268     assert((BlockHdr != NULL) && (BlockHdr->next != NULL));
269 
270     s_memStatis.nb_alloc++;
271     /* Sort the buffers by size, based on defined thresholds */
272     if (requestedSize <= SMALL_BUFFER_SIZE)
273     {
274         s_memStatis.nb_small_buffer++;
275         UPDATE_PEAK(s_memStatis.nb_small_buffer, s_memStatis.peak_small_buffer);
276     }
277     else if (requestedSize <= LARGE_BUFFER_SIZE)
278     {
279         s_memStatis.nb_medium_buffer++;
280         UPDATE_PEAK(s_memStatis.nb_medium_buffer, s_memStatis.peak_medium_buffer);
281     }
282     else
283     {
284         s_memStatis.nb_large_buffer++;
285         UPDATE_PEAK(s_memStatis.nb_large_buffer, s_memStatis.peak_large_buffer);
286     }
287     /* the RAM allocated is the buffer size and the block header size*/
288     s_memStatis.ram_allocated += (uint16_t)(requestedSize + BLOCK_HDR_SIZE);
289     UPDATE_PEAK(s_memStatis.ram_allocated, s_memStatis.peak_ram_allocated);
290 
291     uint32_t block_size = 0U;
292     block_size          = (uint32_t)BlockHdr->next - (uint32_t)BlockHdr - BLOCK_HDR_SIZE;
293 
294     assert(block_size >= requestedSize);
295     /* ram lost is the difference between block size and buffer size */
296     s_memStatis.ram_lost += (uint16_t)(block_size - requestedSize);
297     UPDATE_PEAK(s_memStatis.ram_lost, s_memStatis.peak_ram_lost);
298 
299     UPDATE_PEAK(((uint32_t)FreeBlockHdrList.tail + BLOCK_HDR_SIZE), s_memStatis.peak_upper_addr);
300 
301 #ifdef MEM_MANAGER_BENCH
302     if (time != 0U)
303     {
304         /* update mem stats used for benchmarking */
305         s_memStatis.last_alloc_block_size = (uint16_t)block_size;
306         s_memStatis.last_alloc_buff_size  = (uint16_t)requestedSize;
307         s_memStatis.last_alloc_time       = (uint16_t)time;
308         s_memStatis.total_alloc_time += time;
309         s_memStatis.average_alloc_time = (uint16_t)(s_memStatis.total_alloc_time / s_memStatis.nb_alloc);
310         UPDATE_PEAK((uint16_t)time, s_memStatis.peak_alloc_time);
311     }
312     else /* alloc time is not correct, we bypass this allocation's data */
313     {
314         s_memStatis.nb_alloc--;
315     }
316 #else
317     (void)time;
318 #endif /* MEM_MANAGER_BENCH */
319 }
320 
MEM_BufferFrees_memStatis(void * buffer)321 static void MEM_BufferFrees_memStatis(void *buffer)
322 {
323     void_ptr_t buffer_ptr;
324     void_ptr_t blockHdr_ptr;
325     blockHeader_t *BlockHdr;
326 
327     /* Use union to fix Misra */
328     buffer_ptr.void_ptr      = buffer;
329     blockHdr_ptr.raw_address = buffer_ptr.raw_address - BLOCK_HDR_SIZE;
330     BlockHdr                 = blockHdr_ptr.block_hdr_ptr;
331 
332     /* Existing block must have a next block hdr */
333     assert((BlockHdr != NULL) && (BlockHdr->next != NULL));
334 
335     s_memStatis.ram_allocated -= (uint16_t)(BlockHdr->buff_size + BLOCK_HDR_SIZE);
336     /* Sort the buffers by size, based on defined thresholds */
337     if (BlockHdr->buff_size <= SMALL_BUFFER_SIZE)
338     {
339         s_memStatis.nb_small_buffer--;
340     }
341     else if (BlockHdr->buff_size <= LARGE_BUFFER_SIZE)
342     {
343         s_memStatis.nb_medium_buffer--;
344     }
345     else
346     {
347         s_memStatis.nb_large_buffer--;
348     }
349 
350     uint16_t block_size = 0U;
351     block_size          = (uint16_t)((uint32_t)BlockHdr->next - (uint32_t)BlockHdr - BLOCK_HDR_SIZE);
352 
353     assert(block_size >= BlockHdr->buff_size);
354     assert(s_memStatis.ram_lost >= (block_size - BlockHdr->buff_size));
355 
356     /* as the buffer is free, the ram is not "lost" anymore */
357     s_memStatis.ram_lost -= (block_size - BlockHdr->buff_size);
358 }
359 
360 #endif /* MEM_STATISTICS_INTERNAL */
361 
362 #if defined(gMemManagerLightFreeBlocksCleanUp) && (gMemManagerLightFreeBlocksCleanUp > 0)
MEM_BufferFreeBlocksCleanUp(memAreaPrivDesc_t * p_area,blockHeader_t * BlockHdr)363 static void MEM_BufferFreeBlocksCleanUp(memAreaPrivDesc_t *p_area, blockHeader_t *BlockHdr)
364 {
365     blockHeader_t *NextBlockHdr     = BlockHdr->next;
366     blockHeader_t *NextFreeBlockHdr = BlockHdr->next_free;
367     /* This function shouldn't be called on the last free block */
368     assert(BlockHdr < p_area->ctx.FreeBlockHdrList.tail);
369 
370     /* Step forward and append contiguous free blocks if they can be merged with the unused top of heap */
371     while (NextBlockHdr == NextFreeBlockHdr)
372     {
373         if (NextBlockHdr == NULL)
374         {
375 #if (gMemManagerLightFreeBlocksCleanUp == 2)
376             /* Step backwards to merge all preceeding contiguous free blocks */
377             blockHeader_t *PrevFreeBlockHdr = BlockHdr->prev_free;
378             while (PrevFreeBlockHdr->next == BlockHdr)
379             {
380                 assert(PrevFreeBlockHdr->next_free == BlockHdr);
381                 assert(PrevFreeBlockHdr->used == MEMMANAGER_BLOCK_FREE);
382                 PrevFreeBlockHdr->next_free = BlockHdr->next_free;
383                 PrevFreeBlockHdr->next      = BlockHdr->next;
384                 BlockHdr                    = PrevFreeBlockHdr;
385                 PrevFreeBlockHdr            = BlockHdr->prev_free;
386             }
387 #endif
388             assert(BlockHdr->next == BlockHdr->next_free);
389             assert(BlockHdr->used == MEMMANAGER_BLOCK_FREE);
390             /* pool is reached.  All buffers from BlockHdr to the pool are free
391                remove all next buffers */
392             BlockHdr->next                    = NULL;
393             BlockHdr->next_free               = NULL;
394             p_area->ctx.FreeBlockHdrList.tail = BlockHdr;
395             break;
396         }
397         NextBlockHdr     = NextBlockHdr->next;
398         NextFreeBlockHdr = NextFreeBlockHdr->next_free;
399     }
400 }
401 #endif /* gMemManagerLightFreeBlocksCleanUp */
402 
403 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
MEM_BlockHeaderCheck(blockHeader_t * BlockHdr)404 static void MEM_BlockHeaderCheck(blockHeader_t *BlockHdr)
405 {
406     int ret;
407     uint8_t guardPrePattern[BLOCK_HDR_PREGUARD_SIZE];
408     uint8_t guardPostPattern[BLOCK_HDR_POSTGUARD_SIZE];
409 
410     (void)memset((void *)guardPrePattern, BLOCK_HDR_PREGUARD_PATTERN, BLOCK_HDR_PREGUARD_SIZE);
411     ret = memcmp((const void *)&BlockHdr->preguard, (const void *)guardPrePattern, BLOCK_HDR_PREGUARD_SIZE);
412     if (ret != 0)
413     {
414         MEM_DBG_LOG("Preguard Block Header Corrupted %x", BlockHdr);
415     }
416     assert(ret == 0);
417 
418     (void)memset((void *)guardPostPattern, BLOCK_HDR_POSTGUARD_PATTERN, BLOCK_HDR_POSTGUARD_SIZE);
419     ret = memcmp((const void *)&BlockHdr->postguard, (const void *)guardPostPattern, BLOCK_HDR_POSTGUARD_SIZE);
420     if (ret != 0)
421     {
422         MEM_DBG_LOG("Postguard Block Header Corrupted %x", BlockHdr);
423     }
424     assert(ret == 0);
425 }
426 
MEM_BlockHeaderSetGuards(blockHeader_t * BlockHdr)427 static void MEM_BlockHeaderSetGuards(blockHeader_t *BlockHdr)
428 {
429     (void)memset((void *)&BlockHdr->preguard, BLOCK_HDR_PREGUARD_PATTERN, BLOCK_HDR_PREGUARD_SIZE);
430     (void)memset((void *)&BlockHdr->postguard, BLOCK_HDR_POSTGUARD_PATTERN, BLOCK_HDR_POSTGUARD_SIZE);
431 }
432 
433 #endif
434 
MEM_GetAreaByAreaId(uint8_t area_id)435 static memAreaPrivDesc_t *MEM_GetAreaByAreaId(uint8_t area_id)
436 {
437     memAreaPrivDesc_t *p_area = &heap_area_list;
438     for (uint8_t i = 0u; i < area_id; i++)
439     {
440         p_area = (memAreaPrivDesc_t *)(void *)p_area->next;
441     }
442     return p_area;
443 }
444 
445 /*! *********************************************************************************
446 *************************************************************************************
447 * Public functions
448 *************************************************************************************
449 ********************************************************************************** */
450 
451 #if defined(MEM_STATISTICS_INTERNAL)
MEM_Reports_memStatis(void)452 static void MEM_Reports_memStatis(void)
453 {
454     MEM_DBG_LOG("**************** MEM STATS REPORT **************");
455     MEM_DBG_LOG("Nb Alloc:                  %d\r\n", s_memStatis.nb_alloc);
456     MEM_DBG_LOG("Small buffers:             %d\r\n", s_memStatis.nb_small_buffer);
457     MEM_DBG_LOG("Medium buffers:            %d\r\n", s_memStatis.nb_medium_buffer);
458     MEM_DBG_LOG("Large buffers:             %d\r\n", s_memStatis.nb_large_buffer);
459     MEM_DBG_LOG("Peak small:                %d\r\n", s_memStatis.peak_small_buffer);
460     MEM_DBG_LOG("Peak medium:               %d\r\n", s_memStatis.peak_medium_buffer);
461     MEM_DBG_LOG("Peak large:                %d\r\n", s_memStatis.peak_large_buffer);
462     MEM_DBG_LOG("Current RAM allocated:     %d bytes\r\n", s_memStatis.ram_allocated);
463     MEM_DBG_LOG("Peak RAM allocated:        %d bytes\r\n", s_memStatis.peak_ram_allocated);
464     MEM_DBG_LOG("Current RAM lost:          %d bytes\r\n", s_memStatis.ram_lost);
465     MEM_DBG_LOG("Peak RAM lost:             %d bytes\r\n", s_memStatis.peak_ram_lost);
466     MEM_DBG_LOG("Peak Upper Address:        %x\r\n", s_memStatis.peak_upper_addr);
467 #ifdef MEM_MANAGER_BENCH
468     MEM_DBG_LOG("************************************************\r\n");
469     MEM_DBG_LOG("********* MEM MANAGER BENCHMARK REPORT *********\r\n");
470     MEM_DBG_LOG("Last Alloc Time:           %d us\r\n", s_memStatis.last_alloc_time);
471     MEM_DBG_LOG("Last Alloc Block Size:     %d bytes\r\n", s_memStatis.last_alloc_block_size);
472     MEM_DBG_LOG("Last Alloc Buffer Size:    %d bytes\r\n", s_memStatis.last_alloc_buff_size);
473     MEM_DBG_LOG("Average Alloc Time:        %d us\r\n", s_memStatis.average_alloc_time);
474     MEM_DBG_LOG("Peak Alloc Time:           %d us\r\n", s_memStatis.peak_alloc_time);
475 #endif /* MEM_MANAGER_BENCH */
476     MEM_DBG_LOG("************************************************");
477 }
478 #endif /* MEM_STATISTICS_INTERNAL */
479 
480 static bool initialized = false;
481 
MEM_RegisterExtendedArea(memAreaCfg_t * area_desc,uint8_t * p_area_id,uint16_t flags)482 mem_status_t MEM_RegisterExtendedArea(memAreaCfg_t *area_desc, uint8_t *p_area_id, uint16_t flags)
483 {
484     mem_status_t st = kStatus_MemSuccess;
485     memAreaPrivDesc_t *p_area;
486     uint32_t regPrimask = DisableGlobalIRQ();
487     assert(offsetof(memAreaCfg_t, internal_ctx) == offsetof(memAreaPrivDesc_t, ctx));
488     assert(sizeof(memAreaCfg_t) >= sizeof(memAreaPrivDesc_t));
489     do
490     {
491         void_ptr_t ptr;
492         blockHeader_t *firstBlockHdr;
493         uint32_t initial_level;
494 
495         if (area_desc == NULL)
496         {
497             assert(flags == 0U);
498             p_area = &heap_area_list;
499             /* Area_desc can only be NULL in the case of the implicit default memHeap registration */
500             if ((p_area->start_address.address_ptr != NULL) || (p_area->end_address.address_ptr != NULL))
501             {
502                 st = kStatus_MemInitError;
503                 break;
504             }
505             /* The head of the area des list is necessarily the main heap */
506             p_area->start_address.address_ptr = &memHeap[0];
507             p_area->end_address.raw_address   = memHeapEnd;
508             assert(p_area->end_address.raw_address > p_area->start_address.raw_address);
509             p_area->next = NULL;
510             if (p_area_id != NULL)
511             {
512                 *p_area_id = 0u;
513             }
514         }
515         else
516         {
517             uint32_t area_sz;
518 
519             memAreaPrivDesc_t *new_area_desc = (memAreaPrivDesc_t *)(void *)area_desc;
520             assert((flags & AREA_FLAGS_RFFU) == 0U);
521             /* Registering an additional area : memHeap nust have been registered beforehand */
522             uint8_t id = 0;
523             if (area_desc->start_address == NULL)
524             {
525                 st = kStatus_MemInitError;
526                 break;
527             }
528             if (heap_area_list.start_address.address_ptr == NULL)
529             {
530                 /* memHeap must have been registered before */
531                 st = kStatus_MemInitError;
532                 break;
533             }
534             area_sz = new_area_desc->end_address.raw_address - new_area_desc->start_address.raw_address;
535             if (area_sz <= (uint32_t)KB((uint32_t)1U))
536             {
537                 /* doesn't make sense to register an area smaller than 1024 bytes */
538                 st = kStatus_MemInitError;
539                 break;
540             }
541 
542             id = 1;
543             for (p_area = &heap_area_list; p_area->next != NULL; p_area = (memAreaPrivDesc_t *)(void *)p_area->next)
544             {
545                 if (p_area == new_area_desc)
546                 {
547                     st = kStatus_MemInitError;
548                     break;
549                 }
550                 id++;
551             }
552             if (st != kStatus_MemSuccess)
553             {
554                 break;
555             }
556             if (p_area_id != NULL)
557             {
558                 /* Determine the rank of the area in the list and return it as area_id */
559                 *p_area_id = id;
560             }
561             p_area->next  = area_desc;     /* p_area still points to previous area desc */
562             p_area        = new_area_desc; /* let p_area point to new element */
563             p_area->flags = flags;
564         }
565         /* Here p_area points either to the implicit memHeap when invoked from MEM_Init or to the
566          * newly appended area configuration descriptor
567          */
568         p_area->next    = NULL;
569         ptr.address_ptr = p_area->start_address.address_ptr;
570         firstBlockHdr   = ptr.block_hdr_ptr;
571 
572         /* MEM_DBG_LOG("%x %d\r\n", memHeap, heapSize_c/sizeof(uint32_t)); */
573 
574         /* Init firstBlockHdr as a free block */
575         firstBlockHdr->next      = NULL;
576         firstBlockHdr->used      = MEMMANAGER_BLOCK_FREE;
577         firstBlockHdr->next_free = NULL;
578         firstBlockHdr->prev_free = NULL;
579 
580 #if defined(MEM_STATISTICS_INTERNAL)
581         firstBlockHdr->buff_size = 0U;
582 #endif
583 
584         /* Init FreeBlockHdrList with firstBlockHdr */
585         p_area->ctx.FreeBlockHdrList.head = firstBlockHdr;
586         p_area->ctx.FreeBlockHdrList.tail = firstBlockHdr;
587         initial_level = p_area->end_address.raw_address - ((uint32_t)firstBlockHdr + BLOCK_HDR_SIZE - 1U);
588 
589         p_area->low_watermark = initial_level;
590 
591 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
592         MEM_BlockHeaderSetGuards(firstBlockHdr);
593 #endif
594 
595 #if defined(MEM_STATISTICS_INTERNAL)
596         /* Init memory statistics */
597         MEM_Inits_memStatis(&p_area->ctx.statistics);
598 #endif
599 
600         st = kStatus_MemSuccess;
601     } while (false);
602     ENABLE_GLOBAL_IRQ(regPrimask);
603     return st;
604 }
605 
MEM_AreaIsEmpty(memAreaPrivDesc_t * p_area)606 static bool MEM_AreaIsEmpty(memAreaPrivDesc_t *p_area)
607 {
608     bool res = false;
609 
610     blockHeader_t *FreeBlockHdr     = p_area->ctx.FreeBlockHdrList.head;
611     blockHeader_t *NextFreeBlockHdr = FreeBlockHdr->next_free;
612     if ((FreeBlockHdr == (blockHeader_t *)p_area->start_address.raw_address) && (NextFreeBlockHdr == NULL))
613     {
614         res = true;
615     }
616 
617     return res;
618 }
619 
MEM_UnRegisterExtendedArea(uint8_t area_id)620 mem_status_t MEM_UnRegisterExtendedArea(uint8_t area_id)
621 {
622     mem_status_t st = kStatus_MemUnknownError;
623     memAreaPrivDesc_t *prev_area;
624     memAreaPrivDesc_t *p_area_to_remove = NULL;
625     uint32_t regPrimask                 = DisableGlobalIRQ();
626 
627     do
628     {
629         /* Cannot unregister main heap */
630         if (area_id == 0U)
631         {
632             st = kStatus_MemFreeError;
633             break;
634         }
635         prev_area = MEM_GetAreaByAreaId(area_id - 1U); /* Get previous area in list */
636         if (prev_area == NULL)
637         {
638             st = kStatus_MemFreeError;
639             break;
640         }
641 
642         p_area_to_remove = (memAreaPrivDesc_t *)(void *)prev_area->next;
643         if (p_area_to_remove == NULL)
644         {
645             st = kStatus_MemFreeError;
646             break;
647         }
648         if (!MEM_AreaIsEmpty(p_area_to_remove))
649         {
650             st = kStatus_MemFreeError;
651             break;
652         }
653 
654         /* Only unchain if no remaining allocated buffers */
655         prev_area->next        = p_area_to_remove->next;
656         p_area_to_remove->next = NULL;
657 
658         st = kStatus_MemSuccess;
659     } while (false);
660 
661     ENABLE_GLOBAL_IRQ(regPrimask);
662 
663     return st;
664 }
665 
MEM_Init(void)666 mem_status_t MEM_Init(void)
667 {
668     mem_status_t st = kStatus_MemSuccess;
669     uint8_t memHeap_id;
670     if (initialized == false)
671     {
672         initialized = true;
673         st          = MEM_RegisterExtendedArea(NULL, &memHeap_id, 0U); /* initialized default heap area */
674     }
675     return st;
676 }
677 
MEM_BufferAllocateFromArea(memAreaPrivDesc_t * p_area,uint8_t area_id,uint32_t numBytes)678 static void *MEM_BufferAllocateFromArea(memAreaPrivDesc_t *p_area, uint8_t area_id, uint32_t numBytes)
679 {
680     uint32_t regPrimask = DisableGlobalIRQ();
681 
682     blockHeader_t *FreeBlockHdr     = p_area->ctx.FreeBlockHdrList.head;
683     blockHeader_t *NextFreeBlockHdr = FreeBlockHdr->next_free;
684     blockHeader_t *PrevFreeBlockHdr = FreeBlockHdr->prev_free;
685     blockHeader_t *BlockHdrFound    = NULL;
686 
687 #if defined(cMemManagerLightReuseFreeBlocks) && (cMemManagerLightReuseFreeBlocks > 0)
688     blockHeader_t *UsableBlockHdr = NULL;
689 #endif
690     void *buffer = NULL;
691 
692 #ifdef MEM_MANAGER_BENCH
693     uint32_t START_TIME = 0U, STOP_TIME = 0U, ALLOC_TIME = 0U;
694     START_TIME = TM_GetTimestamp();
695 #endif /* MEM_MANAGER_BENCH */
696 
697     do
698     {
699         assert(FreeBlockHdr->used == MEMMANAGER_BLOCK_FREE);
700         if (FreeBlockHdr->next != NULL)
701         {
702             uint32_t available_size;
703             available_size = (uint32_t)FreeBlockHdr->next - (uint32_t)FreeBlockHdr - BLOCK_HDR_SIZE;
704             /* if a next block hdr exists, it means (by design) that a next free block exists too
705                Because the last block header at the end of the heap will always be free
706                So, the current block header can't be the tail, and the next free can't be NULL */
707             assert(FreeBlockHdr < p_area->ctx.FreeBlockHdrList.tail);
708             assert(FreeBlockHdr->next_free != NULL);
709 
710             if (available_size >= numBytes) /* enough space in this free buffer */
711             {
712 #if defined(cMemManagerLightReuseFreeBlocks) && (cMemManagerLightReuseFreeBlocks > 0)
713                 /* this block could be used if the memory pool if full, so we memorize it */
714                 if (UsableBlockHdr == NULL)
715                 {
716                     UsableBlockHdr = FreeBlockHdr;
717                 }
718                 /* To avoid waste of large blocks with small blocks, make sure the required size is big enough for the
719                   available block otherwise, try an other block !
720                   Do not check if available block size is 4 bytes, take the block anyway ! */
721                 if ((available_size <= 4u) ||
722                     ((available_size - numBytes) < (available_size >> cMemManagerLightReuseFreeBlocks)))
723 #endif
724                 {
725                     /* Found a matching free block */
726                     FreeBlockHdr->used    = MEMMANAGER_BLOCK_USED;
727                     FreeBlockHdr->area_id = area_id;
728 #if defined(MEM_STATISTICS_INTERNAL)
729                     FreeBlockHdr->buff_size = (uint16_t)numBytes;
730 #endif
731                     NextFreeBlockHdr = FreeBlockHdr->next_free;
732                     PrevFreeBlockHdr = FreeBlockHdr->prev_free;
733 
734                     /* In the current state, the current block header can be anywhere
735                        from list head to previous block of list tail */
736                     if (p_area->ctx.FreeBlockHdrList.head == FreeBlockHdr)
737                     {
738                         p_area->ctx.FreeBlockHdrList.head = NextFreeBlockHdr;
739                         NextFreeBlockHdr->prev_free       = NULL;
740                     }
741                     else
742                     {
743                         assert(p_area->ctx.FreeBlockHdrList.head->next_free <= FreeBlockHdr);
744 
745                         NextFreeBlockHdr->prev_free = PrevFreeBlockHdr;
746                         PrevFreeBlockHdr->next_free = NextFreeBlockHdr;
747                     }
748 
749                     BlockHdrFound = FreeBlockHdr;
750                     break;
751                 }
752             }
753         }
754         else
755         {
756             /* last block in the heap, check if available space to allocate the block */
757             int32_t available_size;
758             uint32_t total_size;
759             uint32_t current_footprint = (uint32_t)FreeBlockHdr + BLOCK_HDR_SIZE - 1U;
760             int32_t remaining_bytes;
761 
762             /* Current allocation should never be greater than heap end */
763             available_size = (int32_t)p_area->end_address.raw_address - (int32_t)current_footprint;
764             assert(available_size >= 0);
765 
766             assert(FreeBlockHdr == p_area->ctx.FreeBlockHdrList.tail);
767             total_size      = (numBytes + BLOCK_HDR_SIZE);
768             remaining_bytes = available_size - (int32_t)total_size;
769             if (remaining_bytes >= 0) /* need to keep the room for the next BlockHeader */
770             {
771                 if (p_area->low_watermark > (uint32_t)remaining_bytes)
772                 {
773                     p_area->low_watermark = (uint32_t)remaining_bytes;
774                 }
775                 /* Depending on the platform, some RAM banks could need some reinitialization after a low power
776                  * period, such as ECC RAM banks */
777                 MEM_ReinitRamBank((uint32_t)FreeBlockHdr + BLOCK_HDR_SIZE,
778                                   ROUNDUP_WORD(((uint32_t)FreeBlockHdr + total_size + BLOCK_HDR_SIZE)));
779 
780                 FreeBlockHdr->used    = MEMMANAGER_BLOCK_USED;
781                 FreeBlockHdr->area_id = area_id;
782 #if defined(MEM_STATISTICS_INTERNAL)
783                 FreeBlockHdr->buff_size = (uint16_t)numBytes;
784 #endif
785                 FreeBlockHdr->next      = (blockHeader_t *)ROUNDUP_WORD(((uint32_t)FreeBlockHdr + total_size));
786                 FreeBlockHdr->next_free = FreeBlockHdr->next;
787 
788                 PrevFreeBlockHdr = FreeBlockHdr->prev_free;
789 
790                 NextFreeBlockHdr       = FreeBlockHdr->next_free;
791                 NextFreeBlockHdr->used = MEMMANAGER_BLOCK_FREE;
792 #if defined(MEM_STATISTICS_INTERNAL)
793                 NextFreeBlockHdr->buff_size = 0U;
794 #endif
795                 NextFreeBlockHdr->next      = NULL;
796                 NextFreeBlockHdr->next_free = NULL;
797                 NextFreeBlockHdr->prev_free = PrevFreeBlockHdr;
798 
799                 if (p_area->ctx.FreeBlockHdrList.head == FreeBlockHdr)
800                 {
801                     assert(p_area->ctx.FreeBlockHdrList.head == p_area->ctx.FreeBlockHdrList.tail);
802                     assert(PrevFreeBlockHdr == NULL);
803                     /* last free block in heap was the only free block available
804                        so now the first free block in the heap is the next one */
805                     p_area->ctx.FreeBlockHdrList.head = FreeBlockHdr->next_free;
806                 }
807                 else
808                 {
809                     /* update previous free block header to point its next
810                        to the new free block */
811                     PrevFreeBlockHdr->next_free = NextFreeBlockHdr;
812                 }
813 
814                 /* new free block is now the tail of the free block list */
815                 p_area->ctx.FreeBlockHdrList.tail = NextFreeBlockHdr;
816 
817 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
818                 MEM_BlockHeaderSetGuards(NextFreeBlockHdr);
819 #endif
820 
821                 BlockHdrFound = FreeBlockHdr;
822             }
823 #if defined(cMemManagerLightReuseFreeBlocks) && (cMemManagerLightReuseFreeBlocks > 0)
824             else if (UsableBlockHdr != NULL)
825             {
826                 /* we found a free block that can be used */
827                 UsableBlockHdr->used    = MEMMANAGER_BLOCK_USED;
828                 UsableBlockHdr->area_id = area_id;
829 #if defined(MEM_STATISTICS_INTERNAL)
830                 UsableBlockHdr->buff_size = (uint16_t)numBytes;
831 #endif
832                 NextFreeBlockHdr = UsableBlockHdr->next_free;
833                 PrevFreeBlockHdr = UsableBlockHdr->prev_free;
834 
835                 /* In the current state, the current block header can be anywhere
836                    from list head to previous block of list tail */
837                 if (p_area->ctx.FreeBlockHdrList.head == UsableBlockHdr)
838                 {
839                     p_area->ctx.FreeBlockHdrList.head = NextFreeBlockHdr;
840                     NextFreeBlockHdr->prev_free       = NULL;
841                 }
842                 else
843                 {
844                     assert(p_area->ctx.FreeBlockHdrList.head->next_free <= UsableBlockHdr);
845 
846                     NextFreeBlockHdr->prev_free = PrevFreeBlockHdr;
847                     PrevFreeBlockHdr->next_free = NextFreeBlockHdr;
848                 }
849                 BlockHdrFound = UsableBlockHdr;
850             }
851 #endif
852             else
853             {
854                 BlockHdrFound = NULL;
855             }
856             break;
857         }
858 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
859         MEM_BlockHeaderCheck(FreeBlockHdr->next_free);
860 #endif
861         FreeBlockHdr = FreeBlockHdr->next_free;
862         /* avoid looping */
863         assert(FreeBlockHdr != FreeBlockHdr->next_free);
864     } while (true);
865     /* MEM_DBG_LOG("BlockHdrFound: %x", BlockHdrFound); */
866 
867 #ifdef MEM_DEBUG_OUT_OF_MEMORY
868     assert(BlockHdrFound);
869 #endif
870 
871 #ifdef MEM_MANAGER_BENCH
872     STOP_TIME  = TM_GetTimestamp();
873     ALLOC_TIME = STOP_TIME - START_TIME;
874 #endif /* MEM_MANAGER_BENCH */
875 
876     if (BlockHdrFound != NULL)
877     {
878         void_ptr_t buffer_ptr;
879 #ifdef MEM_TRACKING
880         void_ptr_t lr;
881         lr.raw_address                    = (uint32_t)__mem_get_LR();
882         BlockHdrFound->first_alloc_caller = lr.void_ptr;
883 #endif
884         buffer_ptr.raw_address = (uint32_t)BlockHdrFound + BLOCK_HDR_SIZE;
885         buffer                 = buffer_ptr.void_ptr;
886 #ifdef MEM_STATISTICS_INTERNAL
887 #ifdef MEM_MANAGER_BENCH
888         MEM_BufferAllocates_memStatis(buffer, ALLOC_TIME, numBytes);
889 #else
890         MEM_BufferAllocates_memStatis(buffer, 0, numBytes);
891 #endif
892         if ((p_area->ctx.statistics.nb_alloc % NB_ALLOC_REPORT_THRESHOLD) == 0U)
893         {
894             MEM_Reports_memStatis();
895         }
896 #endif /* MEM_STATISTICS_INTERNAL */
897     }
898     else
899     {
900         /* TODO: Allocation failure try to merge free blocks together  */
901     }
902 
903     EnableGlobalIRQ(regPrimask);
904 
905     return buffer;
906 }
907 
MEM_BufferAllocate(uint32_t numBytes,uint8_t poolId)908 static void *MEM_BufferAllocate(uint32_t numBytes, uint8_t poolId)
909 {
910     memAreaPrivDesc_t *p_area;
911     void *buffer    = NULL;
912     uint8_t area_id = 0U;
913 
914     if (initialized == false)
915     {
916         (void)MEM_Init();
917     }
918     if (poolId == 0U)
919     {
920         area_id = 0U;
921         for (p_area = &heap_area_list; p_area != NULL; p_area = (memAreaPrivDesc_t *)(void *)p_area->next)
922         {
923             if ((p_area->flags & AREA_FLAGS_POOL_NOT_SHARED) == 0U)
924             {
925                 buffer = MEM_BufferAllocateFromArea(p_area, area_id, numBytes);
926                 if (buffer != NULL)
927                 {
928                     break;
929                 }
930             }
931             area_id++;
932         }
933     }
934     else
935     {
936         p_area = MEM_GetAreaByAreaId(poolId); /* Exclusively allocate from targeted pool */
937         if (p_area != NULL)
938         {
939             buffer = MEM_BufferAllocateFromArea(p_area, poolId, numBytes);
940         }
941     }
942     return buffer;
943 }
944 
MEM_BufferAllocWithId(uint32_t numBytes,uint8_t poolId)945 void *MEM_BufferAllocWithId(uint32_t numBytes, uint8_t poolId)
946 {
947 #ifdef MEM_TRACKING
948     void_ptr_t BlockHdr_ptr;
949 #endif
950     void_ptr_t buffer_ptr;
951 
952 #if defined(gFSCI_MemAllocTest_Enabled_d) && (gFSCI_MemAllocTest_Enabled_d)
953     void *pCaller = (void *)((uint32_t *)__mem_get_LR());
954     /* Verify if the caller is part of any FSCI memory allocation test. If so, return NULL. */
955     if (FSCI_MemAllocTestCanAllocate(pCaller) == kStatus_AllocBlock)
956     {
957         buffer_ptr.void_ptr = NULL;
958         return buffer_ptr.void_ptr;
959     }
960 #endif
961 
962     /* Alloc a buffer */
963     buffer_ptr.void_ptr = MEM_BufferAllocate(numBytes, poolId);
964 
965 #ifdef MEM_TRACKING
966     if (buffer_ptr.void_ptr != NULL)
967     {
968         BlockHdr_ptr.raw_address = buffer_ptr.raw_address - BLOCK_HDR_SIZE;
969         /* store caller */
970         BlockHdr_ptr.block_hdr_ptr->second_alloc_caller = (void *)((uint32_t *)__mem_get_LR());
971         ;
972     }
973 #endif
974 
975     return buffer_ptr.void_ptr;
976 }
977 
MEM_BufferFreeBackToArea(memAreaPrivDesc_t * p_area,void * buffer)978 static mem_status_t MEM_BufferFreeBackToArea(memAreaPrivDesc_t *p_area, void *buffer)
979 {
980     void_ptr_t buffer_ptr;
981     buffer_ptr.void_ptr = buffer;
982     blockHeader_t *BlockHdr;
983     BlockHdr = (blockHeader_t *)(buffer_ptr.raw_address - BLOCK_HDR_SIZE);
984 
985     mem_status_t ret = kStatus_MemSuccess;
986     /* when allocating a buffer, we always create a FreeBlockHdr at
987        the end of the buffer, so the FreeBlockHdrList.tail should always
988        be at a higher address than current BlockHdr */
989     assert((uint32_t)BlockHdr < (uint32_t)p_area->ctx.FreeBlockHdrList.tail);
990 
991 #if defined(gMemManagerLightGuardsCheckEnable) && (gMemManagerLightGuardsCheckEnable == 1)
992     MEM_BlockHeaderCheck(BlockHdr->next);
993 #endif
994 
995     /* MEM_DBG_LOG("%x %d", BlockHdr, BlockHdr->buff_size); */
996 
997 #if defined(MEM_STATISTICS_INTERNAL)
998     MEM_BufferFrees_memStatis(buffer);
999 #endif /* MEM_STATISTICS_INTERNAL */
1000 
1001     if ((uint32_t)BlockHdr < (uint32_t)p_area->ctx.FreeBlockHdrList.head)
1002     {
1003         /* BlockHdr is placed before FreeBlockHdrList.head so we can set it as
1004            the new head of the list */
1005         BlockHdr->next_free                          = p_area->ctx.FreeBlockHdrList.head;
1006         BlockHdr->prev_free                          = NULL;
1007         p_area->ctx.FreeBlockHdrList.head->prev_free = BlockHdr;
1008         p_area->ctx.FreeBlockHdrList.head            = BlockHdr;
1009     }
1010     else
1011     {
1012         /* we want to find the previous free block header
1013            here, we cannot use prev_free as this information could be outdated
1014            so we need to run through the whole list to be sure to catch the
1015            correct previous free block header */
1016         blockHeader_t *PrevFreeBlockHdr = p_area->ctx.FreeBlockHdrList.head;
1017         while ((uint32_t)PrevFreeBlockHdr->next_free < (uint32_t)BlockHdr)
1018         {
1019             PrevFreeBlockHdr = PrevFreeBlockHdr->next_free;
1020         }
1021         /* insert the new free block in the list */
1022         BlockHdr->next_free            = PrevFreeBlockHdr->next_free;
1023         BlockHdr->prev_free            = PrevFreeBlockHdr;
1024         BlockHdr->next_free->prev_free = BlockHdr;
1025         PrevFreeBlockHdr->next_free    = BlockHdr;
1026     }
1027 
1028     BlockHdr->used = MEMMANAGER_BLOCK_FREE;
1029 #if defined(MEM_STATISTICS_INTERNAL)
1030     BlockHdr->buff_size = 0U;
1031 #endif
1032 
1033 #if defined(gMemManagerLightFreeBlocksCleanUp) && (gMemManagerLightFreeBlocksCleanUp != 0)
1034     MEM_BufferFreeBlocksCleanUp(p_area, BlockHdr);
1035 #endif
1036     return ret;
1037 }
1038 
MEM_BufferFree(void * buffer)1039 mem_status_t MEM_BufferFree(void *buffer /* IN: Block of memory to free*/)
1040 {
1041     mem_status_t ret = kStatus_MemSuccess;
1042     void_ptr_t buffer_ptr;
1043     buffer_ptr.void_ptr = buffer;
1044 
1045     if (buffer == NULL)
1046     {
1047         ret = kStatus_MemFreeError;
1048     }
1049     else
1050     {
1051         uint32_t regPrimask = DisableGlobalIRQ();
1052 
1053         blockHeader_t *BlockHdr;
1054         BlockHdr = (blockHeader_t *)(buffer_ptr.raw_address - BLOCK_HDR_SIZE);
1055 
1056         /* assert checks */
1057         assert(BlockHdr->used == MEMMANAGER_BLOCK_USED);
1058         assert(BlockHdr->next != NULL);
1059         memAreaPrivDesc_t *p_area = MEM_GetAreaByAreaId(BlockHdr->area_id);
1060 
1061         if (p_area != NULL)
1062         {
1063             ret = MEM_BufferFreeBackToArea(p_area, buffer);
1064         }
1065         else
1066         {
1067             assert(false);
1068             ret = kStatus_MemFreeError;
1069         }
1070 
1071         EnableGlobalIRQ(regPrimask);
1072     }
1073 
1074     return ret;
1075 }
1076 
MEM_BufferFreeAllWithId(uint8_t poolId)1077 mem_status_t MEM_BufferFreeAllWithId(uint8_t poolId)
1078 {
1079     mem_status_t status = kStatus_MemSuccess;
1080 #if (defined(MEM_TRACK_ALLOC_SOURCE) && (MEM_TRACK_ALLOC_SOURCE == 1))
1081 #ifdef MEMMANAGER_NOT_IMPLEMENTED_YET
1082 
1083 #endif /* MEMMANAGER_NOT_IMPLEMENTED_YET */
1084 #else  /* (defined(MEM_TRACK_ALLOC_SOURCE) && (MEM_TRACK_ALLOC_SOURCE == 1)) */
1085     status = kStatus_MemFreeError;
1086 #endif /* (defined(MEM_TRACK_ALLOC_SOURCE) && (MEM_TRACK_ALLOC_SOURCE == 1)) */
1087     return status;
1088 }
1089 
MEM_GetHeapUpperLimitByAreaId(uint8_t area_id)1090 uint32_t MEM_GetHeapUpperLimitByAreaId(uint8_t area_id)
1091 {
1092     /* There is always a free block at the end of the heap
1093         and this free block is the tail of the list */
1094     uint32_t upper_limit = 0U;
1095     do
1096     {
1097         memAreaPrivDesc_t *p_area;
1098         p_area = MEM_GetAreaByAreaId(area_id);
1099         if (p_area == NULL)
1100         {
1101             break;
1102         }
1103         upper_limit = ((uint32_t)p_area->ctx.FreeBlockHdrList.tail + BLOCK_HDR_SIZE);
1104 
1105     } while (false);
1106 
1107     return upper_limit;
1108 }
1109 
MEM_GetHeapUpperLimit(void)1110 uint32_t MEM_GetHeapUpperLimit(void)
1111 {
1112     return MEM_GetHeapUpperLimitByAreaId(0u);
1113 }
1114 
MEM_GetFreeHeapSizeLowWaterMarkByAreaId(uint8_t area_id)1115 uint32_t MEM_GetFreeHeapSizeLowWaterMarkByAreaId(uint8_t area_id)
1116 {
1117     uint32_t low_watermark = 0U;
1118     do
1119     {
1120         memAreaPrivDesc_t *p_area;
1121         p_area = MEM_GetAreaByAreaId(area_id);
1122         if (p_area == NULL)
1123         {
1124             break;
1125         }
1126         low_watermark = p_area->low_watermark;
1127 
1128     } while (false);
1129     return low_watermark;
1130 }
1131 
MEM_GetFreeHeapSizeLowWaterMark(void)1132 uint32_t MEM_GetFreeHeapSizeLowWaterMark(void)
1133 {
1134     return MEM_GetFreeHeapSizeLowWaterMarkByAreaId(0u);
1135 }
1136 
MEM_ResetFreeHeapSizeLowWaterMarkByAreaId(uint8_t area_id)1137 uint32_t MEM_ResetFreeHeapSizeLowWaterMarkByAreaId(uint8_t area_id)
1138 {
1139     uint32_t current_level = 0U;
1140     do
1141     {
1142         memAreaPrivDesc_t *p_area;
1143         blockHeader_t *FreeBlockHdr;
1144         uint32_t current_footprint;
1145         p_area = MEM_GetAreaByAreaId(area_id);
1146         if (p_area == NULL)
1147         {
1148             break;
1149         }
1150         FreeBlockHdr      = p_area->ctx.FreeBlockHdrList.head;
1151         current_footprint = (uint32_t)FreeBlockHdr + BLOCK_HDR_SIZE - 1U;
1152 
1153         /* Current allocation should never be greater than heap end */
1154         current_level         = p_area->end_address.raw_address - current_footprint;
1155         p_area->low_watermark = current_level;
1156 
1157     } while (false);
1158     return current_level;
1159 }
1160 
MEM_ResetFreeHeapSizeLowWaterMark(void)1161 uint32_t MEM_ResetFreeHeapSizeLowWaterMark(void)
1162 {
1163     return MEM_ResetFreeHeapSizeLowWaterMarkByAreaId(0u);
1164 }
1165 
MEM_BufferGetSize(void * buffer)1166 uint16_t MEM_BufferGetSize(void *buffer)
1167 {
1168     blockHeader_t *BlockHdr = NULL;
1169     uint16_t size;
1170     /* union used to fix Misra */
1171     void_ptr_t buffer_ptr;
1172     buffer_ptr.void_ptr = buffer;
1173 
1174     if (buffer != NULL)
1175     {
1176         BlockHdr = (blockHeader_t *)(buffer_ptr.raw_address - BLOCK_HDR_SIZE);
1177         /* block size is the space between current BlockHdr and next BlockHdr */
1178         size = (uint16_t)((uint32_t)BlockHdr->next - (uint32_t)BlockHdr - BLOCK_HDR_SIZE);
1179     }
1180     else
1181     {
1182         /* is case of a NULL buffer, we return 0U */
1183         size = 0U;
1184     }
1185 
1186     return size;
1187 }
1188 
MEM_BufferRealloc(void * buffer,uint32_t new_size)1189 void *MEM_BufferRealloc(void *buffer, uint32_t new_size)
1190 {
1191     void *realloc_buffer = NULL;
1192     uint16_t block_size  = 0U;
1193     do
1194     {
1195         if (new_size >= MAX_UINT16)
1196         {
1197             realloc_buffer = NULL;
1198             /* Bypass he whole procedure so keep original buffer that cannot be reallocated */
1199             break;
1200         }
1201         if (new_size == 0U)
1202         {
1203             /* new requested size is 0, free old buffer */
1204             (void)MEM_BufferFree(buffer);
1205             realloc_buffer = NULL;
1206             break;
1207         }
1208         if (buffer == NULL)
1209         {
1210             /* input buffer is NULL simply allocate a new buffer and return it */
1211             realloc_buffer = MEM_BufferAllocate(new_size, 0U);
1212             break;
1213         }
1214         /* Current buffer needs to be reallocated */
1215         block_size = MEM_BufferGetSize(buffer);
1216 
1217         if ((uint16_t)new_size <= block_size)
1218         {
1219             /* current buffer is large enough for the new requested size
1220                we can still use it */
1221             realloc_buffer = buffer;
1222         }
1223         else
1224         {
1225             /* not enough space in the current block, creating a new one */
1226             realloc_buffer = MEM_BufferAllocate(new_size, 0U);
1227 
1228             if (realloc_buffer != NULL)
1229             {
1230                 /* copy input buffer data to new buffer */
1231                 (void)memcpy(realloc_buffer, buffer, (uint32_t)block_size);
1232 
1233                 /* free old buffer */
1234                 (void)MEM_BufferFree(buffer);
1235             }
1236         }
1237     } while (false);
1238     return realloc_buffer;
1239 }
MEM_GetFreeHeapSpaceInArea(memAreaPrivDesc_t * p_area)1240 static uint32_t MEM_GetFreeHeapSpaceInArea(memAreaPrivDesc_t *p_area)
1241 {
1242     uint32_t free_sz = 0U;
1243     /* skip unshared areas  */
1244     blockHeader_t *freeBlockHdr = p_area->ctx.FreeBlockHdrList.head;
1245 
1246     /* Count every free block in the free space */
1247     while (freeBlockHdr != p_area->ctx.FreeBlockHdrList.tail)
1248     {
1249         free_sz += ((uint32_t)freeBlockHdr->next - (uint32_t)freeBlockHdr - BLOCK_HDR_SIZE);
1250         freeBlockHdr = freeBlockHdr->next_free;
1251     }
1252 
1253     /* Add remaining free space in the heap */
1254     free_sz += p_area->end_address.raw_address - (uint32_t)p_area->ctx.FreeBlockHdrList.tail - BLOCK_HDR_SIZE + (uint32_t)1U;
1255     return free_sz;
1256 }
1257 
MEM_GetFreeHeapSizeByAreaId(uint8_t area_id)1258 uint32_t MEM_GetFreeHeapSizeByAreaId(uint8_t area_id)
1259 {
1260     memAreaPrivDesc_t *p_area;
1261     uint32_t free_size = 0U;
1262 
1263     if (area_id == 0U)
1264     {
1265         /* Iterate through all registered areas */
1266         for (p_area = &heap_area_list; p_area != NULL; p_area = (memAreaPrivDesc_t *)(void *)p_area->next)
1267         {
1268             if ((p_area->flags & AREA_FLAGS_POOL_NOT_SHARED) == 0U)
1269             {
1270                 free_size += MEM_GetFreeHeapSpaceInArea(p_area);
1271             }
1272         }
1273     }
1274     else
1275     {
1276         p_area = MEM_GetAreaByAreaId(area_id);
1277         if (p_area != NULL)
1278         {
1279             free_size = MEM_GetFreeHeapSpaceInArea(p_area);
1280         }
1281     }
1282     return free_size;
1283 }
1284 
MEM_GetFreeHeapSize(void)1285 uint32_t MEM_GetFreeHeapSize(void)
1286 {
1287     return MEM_GetFreeHeapSizeByAreaId(0U);
1288 }
1289 
MEM_ReinitRamBank(uint32_t startAddress,uint32_t endAddress)1290 __attribute__((weak)) void MEM_ReinitRamBank(uint32_t startAddress, uint32_t endAddress)
1291 {
1292     /* To be implemented by the platform */
1293     (void)startAddress;
1294     (void)endAddress;
1295 }
1296 
1297 #if 0 /* MISRA C-2012 Rule 8.4 */
1298 uint32_t MEM_GetAvailableBlocks(uint32_t size)
1299 {
1300     /* Function not implemented yet */
1301     assert(0);
1302 
1303     return 0U;
1304 }
1305 #endif
1306 
MEM_CallocAlt(size_t len,size_t val)1307 void *MEM_CallocAlt(size_t len, size_t val)
1308 {
1309     size_t blk_size;
1310 
1311     blk_size = len * val;
1312 
1313     void *pData = MEM_BufferAllocate(blk_size, 0U);
1314     if (NULL != pData)
1315     {
1316         (void)memset(pData, 0, blk_size);
1317     }
1318 
1319     return pData;
1320 }
1321 
1322 #if 0 /* MISRA C-2012 Rule 8.4 */
1323 void MEM_FreeAlt(void *pData)
1324 {
1325     /* Function not implemented yet */
1326     assert(0);
1327 }
1328 #endif
1329 
1330 #endif
1331