1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include "heap_private.h"
7 #include <assert.h>
8 #include <string.h>
9 #include <sys/lock.h>
10 
11 #include "esp_log.h"
12 #include "multi_heap.h"
13 #include "multi_heap_platform.h"
14 #include "esp_heap_caps_init.h"
15 #include "heap_memory_layout.h"
16 
17 static const char *TAG = "heap_init";
18 
19 /* Linked-list of registered heaps */
20 struct registered_heap_ll registered_heaps;
21 
register_heap(heap_t * region)22 static void register_heap(heap_t *region)
23 {
24     size_t heap_size = region->end - region->start;
25     assert(heap_size <= HEAP_SIZE_MAX);
26     region->heap = multi_heap_register((void *)region->start, heap_size);
27     if (region->heap != NULL) {
28         ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
29     }
30 }
31 
heap_caps_enable_nonos_stack_heaps(void)32 void heap_caps_enable_nonos_stack_heaps(void)
33 {
34     heap_t *heap;
35     SLIST_FOREACH(heap, &registered_heaps, next) {
36         // Assume any not-yet-registered heap is
37         // a nonos-stack heap
38         if (heap->heap == NULL) {
39             register_heap(heap);
40             if (heap->heap != NULL) {
41                 multi_heap_set_lock(heap->heap, &heap->heap_mux);
42             }
43         }
44     }
45 }
46 
47 /* Initialize the heap allocator to use all of the memory not
48    used by static data or reserved for other purposes
49  */
heap_caps_init(void)50 void heap_caps_init(void)
51 {
52 #ifdef CONFIG_HEAP_TLSF_USE_ROM_IMPL
53     extern void multi_heap_in_rom_init(void);
54     multi_heap_in_rom_init();
55 #endif
56     /* Get the array of regions that we can use for heaps
57        (with reserved memory removed already.)
58      */
59     size_t num_regions = soc_get_available_memory_region_max_count();
60     soc_memory_region_t regions[num_regions];
61     num_regions = soc_get_available_memory_regions(regions);
62 
63     // the following for loop will calculate the number of possible heaps
64     // based on how many regions were coalesed.
65     size_t num_heaps = num_regions;
66 
67     //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
68     //it's useful to coalesce adjacent regions that have the same type.
69     for (size_t i = 1; i < num_regions; i++) {
70         soc_memory_region_t *a = &regions[i - 1];
71         soc_memory_region_t *b = &regions[i];
72         if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
73             a->type = -1;
74             b->start = a->start;
75             b->size += a->size;
76 
77             // remove one heap from the number of heaps as
78             // 2 regions just got coalesed.
79             num_heaps--;
80         }
81     }
82 
83     /* Start by allocating the registered heap data on the stack.
84 
85        Once we have a heap to copy it to, we will copy it to a heap buffer.
86     */
87     heap_t temp_heaps[num_heaps];
88     size_t heap_idx = 0;
89 
90     ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
91     for (size_t i = 0; i < num_regions; i++) {
92         soc_memory_region_t *region = &regions[i];
93         const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
94         heap_t *heap = &temp_heaps[heap_idx];
95         if (region->type == -1) {
96             continue;
97         }
98         heap_idx++;
99         assert(heap_idx <= num_heaps);
100 
101         memcpy(heap->caps, type->caps, sizeof(heap->caps));
102         heap->start = region->start;
103         heap->end = region->start + region->size;
104         MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
105         if (type->startup_stack) {
106             /* Will be registered when OS scheduler starts */
107             heap->heap = NULL;
108         } else {
109             register_heap(heap);
110         }
111         SLIST_NEXT(heap, next) = NULL;
112 
113         ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
114                        region->start, region->size, region->size / 1024, type->name);
115     }
116 
117     assert(heap_idx == num_heaps);
118 
119     /* Allocate the permanent heap data that we'll use as a linked list at runtime.
120 
121        Allocate this part of data contiguously, even though it's a linked list... */
122     assert(SLIST_EMPTY(&registered_heaps));
123 
124     heap_t *heaps_array = NULL;
125     for (size_t i = 0; i < num_heaps; i++) {
126         if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
127             /* use the first DRAM heap which can fit the data */
128             heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
129             if (heaps_array != NULL) {
130                 break;
131             }
132         }
133     }
134     assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
135 
136     memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
137 
138     /* Iterate the heaps and set their locks, also add them to the linked list. */
139     for (size_t i = 0; i < num_heaps; i++) {
140         if (heaps_array[i].heap != NULL) {
141             multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
142         }
143         if (i == 0) {
144             SLIST_INSERT_HEAD(&registered_heaps, &heaps_array[0], next);
145         } else {
146             SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
147         }
148     }
149 }
150 
heap_caps_add_region(intptr_t start,intptr_t end)151 esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
152 {
153     if (start == 0) {
154         return ESP_ERR_INVALID_ARG;
155     }
156 
157     for (size_t i = 0; i < soc_memory_region_count; i++) {
158         const soc_memory_region_t *region = &soc_memory_regions[i];
159         // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
160         if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
161             const uint32_t *caps = soc_memory_types[region->type].caps;
162             return heap_caps_add_region_with_caps(caps, start, end);
163         }
164     }
165 
166     return ESP_ERR_NOT_FOUND;
167 }
168 
169 /* This API is used for internal test purpose and hence its not marked as static */
heap_caps_check_add_region_allowed(intptr_t heap_start,intptr_t heap_end,intptr_t start,intptr_t end)170 bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end, intptr_t start, intptr_t end)
171 {
172     /*
173      *  We assume that in any region, the "start" must be strictly less than the end.
174      *  Specially, the 3rd scenario can be allowed. For example, allocate memory from heap,
175      *  then change the capability and call this function to create a new region for special
176      *  application.
177      *  This 'start = start' and 'end = end' scenario is incorrect because the same region
178      *  cannot be added twice. In fact, registering the same memory region as a heap twice
179      *  would cause a corruption and then an exception at runtime.
180      *
181      *  the existing heap region                                  s(tart)                e(nd)
182      *                                                            |----------------------|
183      *
184      *  1.add region  (e1<s)                                |-----|                                      correct: bool condition_1 = end < heap_start;
185      *
186      *  2.add region  (s2<s && e2>s)                        |-----------------|                          wrong:   bool condition_2 = start < heap_start && end > heap_start;
187      *                                                      |---------------------------------|          wrong
188      *
189      *  3.add region  (s3>=s && e3<e)                             |---------------|                      correct: bool condition_3 = start >= heap_start && end < heap_end;
190      *                                                                  |--------------|                 correct
191      *
192      *  4.add region  (s4<e && e4>e)                              |------------------------|             wrong:   bool condition_4 = start < heap_end && end > heap_end;
193      *                                                                  |---------------------|          wrong
194      *
195      *  5.add region  (s5>=e)                                                            |----|          correct: bool condition_5 = start >= heap_end;
196      *
197      * 6.add region (s6==s && e6==e)                              |----------------------|               wrong: bool condition_6 = start == heap_start && end == heap_end;
198      */
199 
200     bool condition_2 = start < heap_start && end > heap_start;        // if true then region not allowed
201     bool condition_4 = start < heap_end && end > heap_end;            // if true then region not allowed
202     bool condition_6 = start == heap_start && end == heap_end;        // if true then region not allowed
203 
204     return !(condition_2 || condition_4 || condition_6);
205 }
206 
heap_caps_add_region_with_caps(const uint32_t caps[],intptr_t start,intptr_t end)207 esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
208 {
209     esp_err_t err = ESP_FAIL;
210     if (caps == NULL || start == 0 || end == 0 || end <= start) {
211         return ESP_ERR_INVALID_ARG;
212     }
213 
214     //Check if region overlaps the start and/or end of an existing region. If so, the
215     //region is invalid (or maybe added twice)
216     heap_t *heap;
217     SLIST_FOREACH(heap, &registered_heaps, next) {
218         if (!heap_caps_check_add_region_allowed(heap->start, heap->end, start, end)) {
219             ESP_EARLY_LOGD(TAG, "invalid overlap detected with existing heap region");
220             return ESP_FAIL;
221         }
222     }
223 
224     heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
225     if (p_new == NULL) {
226         err = ESP_ERR_NO_MEM;
227         goto done;
228     }
229     memcpy(p_new->caps, caps, sizeof(p_new->caps));
230     p_new->start = start;
231     p_new->end = end;
232     MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
233     p_new->heap = multi_heap_register((void *)start, end - start);
234     SLIST_NEXT(p_new, next) = NULL;
235     if (p_new->heap == NULL) {
236         err = ESP_ERR_INVALID_SIZE;
237         goto done;
238     }
239     multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
240 
241     /* (This insertion is atomic to registered_heaps, so
242        we don't need to worry about thread safety for readers,
243        only for writers. */
244     static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
245     MULTI_HEAP_LOCK(&registered_heaps_write_lock);
246     SLIST_INSERT_HEAD(&registered_heaps, p_new, next);
247     MULTI_HEAP_UNLOCK(&registered_heaps_write_lock);
248 
249     err = ESP_OK;
250 
251  done:
252     if (err != ESP_OK) {
253         free(p_new);
254     }
255     return err;
256 }
257