1 /**
2  * @file lv_malloc_core.c
3  */
4 
5 /*********************
6  *      INCLUDES
7  *********************/
8 #include "../lv_mem.h"
9 #if LV_USE_STDLIB_MALLOC == LV_STDLIB_BUILTIN
10 
11 #include "lv_tlsf.h"
12 #include "../lv_string.h"
13 #include "../../misc/lv_assert.h"
14 #include "../../misc/lv_log.h"
15 #include "../../misc/lv_ll.h"
16 #include "../../misc/lv_math.h"
17 #include "../../osal/lv_os.h"
18 #include "../../core/lv_global.h"
19 
20 #ifdef LV_MEM_POOL_INCLUDE
21     #include LV_MEM_POOL_INCLUDE
22 #endif
23 
24 /*********************
25  *      DEFINES
26  *********************/
27 /*memset the allocated memories to 0xaa and freed memories to 0xbb (just for testing purposes)*/
28 #ifndef LV_MEM_ADD_JUNK
29     #define LV_MEM_ADD_JUNK  0
30 #endif
31 
32 #ifdef LV_ARCH_64
33     #define MEM_UNIT         uint64_t
34     #define ALIGN_MASK       0x7
35 #else
36     #define MEM_UNIT         uint32_t
37     #define ALIGN_MASK       0x3
38 #endif
39 #define state LV_GLOBAL_DEFAULT()->tlsf_state
40 
41 /**********************
42  *      TYPEDEFS
43  **********************/
44 
45 /**********************
46  *  STATIC PROTOTYPES
47  **********************/
48 static void lv_mem_walker(void * ptr, size_t size, int used, void * user);
49 
50 /**********************
51  *  STATIC VARIABLES
52  **********************/
53 
54 /**********************
55  *      MACROS
56  **********************/
57 #if LV_USE_LOG && LV_LOG_TRACE_MEM
58     #define LV_TRACE_MEM(...) LV_LOG_TRACE(__VA_ARGS__)
59 #else
60     #define LV_TRACE_MEM(...)
61 #endif
62 
63 #define _COPY(d, s) *d = *s; d++; s++;
64 #define _SET(d, v) *d = v; d++;
65 #define _REPEAT8(expr) expr expr expr expr expr expr expr expr
66 
67 /**********************
68  *   GLOBAL FUNCTIONS
69  **********************/
70 
lv_mem_init(void)71 void lv_mem_init(void)
72 {
73 #if LV_USE_OS
74     lv_mutex_init(&state.mutex);
75 #endif
76 
77 #if LV_MEM_ADR == 0
78 #ifdef LV_MEM_POOL_ALLOC
79     state.tlsf = lv_tlsf_create_with_pool((void *)LV_MEM_POOL_ALLOC(LV_MEM_SIZE), LV_MEM_SIZE);
80 #else
81     /*Allocate a large array to store the dynamically allocated data*/
82     static LV_ATTRIBUTE_LARGE_RAM_ARRAY MEM_UNIT work_mem_int[LV_MEM_SIZE / sizeof(MEM_UNIT)];
83     state.tlsf = lv_tlsf_create_with_pool((void *)work_mem_int, LV_MEM_SIZE);
84 #endif
85 #else
86     state.tlsf = lv_tlsf_create_with_pool((void *)LV_MEM_ADR, LV_MEM_SIZE);
87 #endif
88 
89     lv_ll_init(&state.pool_ll, sizeof(lv_pool_t));
90 
91     /*Record the first pool*/
92     lv_pool_t * pool_p = lv_ll_ins_tail(&state.pool_ll);
93     LV_ASSERT_MALLOC(pool_p);
94     *pool_p = lv_tlsf_get_pool(state.tlsf);
95 
96 #if LV_MEM_ADD_JUNK
97     LV_LOG_WARN("LV_MEM_ADD_JUNK is enabled which makes LVGL much slower");
98 #endif
99 }
100 
lv_mem_deinit(void)101 void lv_mem_deinit(void)
102 {
103     lv_ll_clear(&state.pool_ll);
104     lv_tlsf_destroy(state.tlsf);
105 #if LV_USE_OS
106     lv_mutex_delete(&state.mutex);
107 #endif
108 }
109 
lv_mem_add_pool(void * mem,size_t bytes)110 lv_mem_pool_t lv_mem_add_pool(void * mem, size_t bytes)
111 {
112     lv_mem_pool_t new_pool = lv_tlsf_add_pool(state.tlsf, mem, bytes);
113     if(!new_pool) {
114         LV_LOG_WARN("failed to add memory pool, address: %p, size: %zu", mem, bytes);
115         return NULL;
116     }
117 
118     lv_pool_t * pool_p = lv_ll_ins_tail(&state.pool_ll);
119     LV_ASSERT_MALLOC(pool_p);
120     *pool_p = new_pool;
121 
122     return new_pool;
123 }
124 
lv_mem_remove_pool(lv_mem_pool_t pool)125 void lv_mem_remove_pool(lv_mem_pool_t pool)
126 {
127     lv_pool_t * pool_p;
128     LV_LL_READ(&state.pool_ll, pool_p) {
129         if(*pool_p == pool) {
130             lv_ll_remove(&state.pool_ll, pool_p);
131             lv_free(pool_p);
132             lv_tlsf_remove_pool(state.tlsf, pool);
133             return;
134         }
135     }
136     LV_LOG_WARN("invalid pool: %p", pool);
137 }
138 
lv_malloc_core(size_t size)139 void * lv_malloc_core(size_t size)
140 {
141 #if LV_USE_OS
142     lv_mutex_lock(&state.mutex);
143 #endif
144     void * p = lv_tlsf_malloc(state.tlsf, size);
145 
146     if(p) {
147         state.cur_used += lv_tlsf_block_size(p);
148         state.max_used = LV_MAX(state.cur_used, state.max_used);
149     }
150 
151 #if LV_USE_OS
152     lv_mutex_unlock(&state.mutex);
153 #endif
154     return p;
155 }
156 
lv_realloc_core(void * p,size_t new_size)157 void * lv_realloc_core(void * p, size_t new_size)
158 {
159 #if LV_USE_OS
160     lv_mutex_lock(&state.mutex);
161 #endif
162 
163     size_t old_size = lv_tlsf_block_size(p);
164     void * p_new = lv_tlsf_realloc(state.tlsf, p, new_size);
165 
166     if(p_new) {
167         state.cur_used -= old_size;
168         state.cur_used += lv_tlsf_block_size(p_new);
169         state.max_used = LV_MAX(state.cur_used, state.max_used);
170     }
171 #if LV_USE_OS
172     lv_mutex_unlock(&state.mutex);
173 #endif
174 
175     return p_new;
176 }
177 
lv_free_core(void * p)178 void lv_free_core(void * p)
179 {
180 #if LV_USE_OS
181     lv_mutex_lock(&state.mutex);
182 #endif
183 
184 #if LV_MEM_ADD_JUNK
185     lv_memset(p, 0xbb, lv_tlsf_block_size(data));
186 #endif
187     size_t size = lv_tlsf_block_size(p);
188     lv_tlsf_free(state.tlsf, p);
189     if(state.cur_used > size) state.cur_used -= size;
190     else state.cur_used = 0;
191 
192 #if LV_USE_OS
193     lv_mutex_unlock(&state.mutex);
194 #endif
195 }
196 
lv_mem_monitor_core(lv_mem_monitor_t * mon_p)197 void lv_mem_monitor_core(lv_mem_monitor_t * mon_p)
198 {
199     /*Init the data*/
200     lv_memzero(mon_p, sizeof(lv_mem_monitor_t));
201     LV_TRACE_MEM("begin");
202 
203     lv_pool_t * pool_p;
204     LV_LL_READ(&state.pool_ll, pool_p) {
205         lv_tlsf_walk_pool(*pool_p, lv_mem_walker, mon_p);
206     }
207 
208     mon_p->used_pct = 100 - (uint64_t)100U * mon_p->free_size / mon_p->total_size;
209     if(mon_p->free_size > 0) {
210         mon_p->frag_pct = (uint64_t)mon_p->free_biggest_size * 100U / mon_p->free_size;
211         mon_p->frag_pct = 100 - mon_p->frag_pct;
212     }
213     else {
214         mon_p->frag_pct = 0; /*no fragmentation if all the RAM is used*/
215     }
216 
217     mon_p->max_used = state.max_used;
218 
219     LV_TRACE_MEM("finished");
220 }
221 
lv_mem_test_core(void)222 lv_result_t lv_mem_test_core(void)
223 {
224 #if LV_USE_OS
225     lv_mutex_lock(&state.mutex);
226 #endif
227     if(lv_tlsf_check(state.tlsf)) {
228         LV_LOG_WARN("failed");
229 #if LV_USE_OS
230         lv_mutex_unlock(&state.mutex);
231 #endif
232         return LV_RESULT_INVALID;
233     }
234 
235     lv_pool_t * pool_p;
236     LV_LL_READ(&state.pool_ll, pool_p) {
237         if(lv_tlsf_check_pool(*pool_p)) {
238             LV_LOG_WARN("pool failed");
239 #if LV_USE_OS
240             lv_mutex_unlock(&state.mutex);
241 #endif
242             return LV_RESULT_INVALID;
243         }
244     }
245 
246     LV_TRACE_MEM("passed");
247 #if LV_USE_OS
248     lv_mutex_unlock(&state.mutex);
249 #endif
250     return LV_RESULT_OK;
251 }
252 
253 /**********************
254  *   STATIC FUNCTIONS
255  **********************/
256 
lv_mem_walker(void * ptr,size_t size,int used,void * user)257 static void lv_mem_walker(void * ptr, size_t size, int used, void * user)
258 {
259     LV_UNUSED(ptr);
260 
261     lv_mem_monitor_t * mon_p = user;
262     mon_p->total_size += size;
263     if(used) {
264         mon_p->used_cnt++;
265     }
266     else {
267         mon_p->free_cnt++;
268         mon_p->free_size += size;
269         if(size > mon_p->free_biggest_size)
270             mon_p->free_biggest_size = size;
271     }
272 }
273 #endif /*LV_STDLIB_BUILTIN*/
274