1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "heap_private.h"
15 #include <assert.h>
16 #include <string.h>
17 #include <sys/lock.h>
18 
19 #include "esp_log.h"
20 #include "multi_heap.h"
21 #include "multi_heap_platform.h"
22 #include "esp_heap_caps_init.h"
23 #include "soc/soc_memory_layout.h"
24 
25 static const char *TAG = "heap_init";
26 
27 /* Linked-list of registered heaps */
28 struct registered_heap_ll registered_heaps;
29 
register_heap(heap_t * region)30 static void register_heap(heap_t *region)
31 {
32     size_t heap_size = region->end - region->start;
33     assert(heap_size <= HEAP_SIZE_MAX);
34     region->heap = multi_heap_register((void *)region->start, heap_size);
35     if (region->heap != NULL) {
36         ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap);
37     }
38 }
39 
heap_caps_enable_nonos_stack_heaps(void)40 void heap_caps_enable_nonos_stack_heaps(void)
41 {
42     heap_t *heap;
43     SLIST_FOREACH(heap, &registered_heaps, next) {
44         // Assume any not-yet-registered heap is
45         // a nonos-stack heap
46         if (heap->heap == NULL) {
47             register_heap(heap);
48             if (heap->heap != NULL) {
49                 multi_heap_set_lock(heap->heap, &heap->heap_mux);
50             }
51         }
52     }
53 }
54 
55 /* Initialize the heap allocator to use all of the memory not
56    used by static data or reserved for other purposes
57  */
heap_caps_init(void)58 void heap_caps_init(void)
59 {
60     /* Get the array of regions that we can use for heaps
61        (with reserved memory removed already.)
62      */
63     size_t num_regions = soc_get_available_memory_region_max_count();
64     soc_memory_region_t regions[num_regions];
65     num_regions = soc_get_available_memory_regions(regions);
66 
67     //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
68     //it's useful to coalesce adjacent regions that have the same type.
69     for (size_t i = 1; i < num_regions; i++) {
70         soc_memory_region_t *a = &regions[i - 1];
71         soc_memory_region_t *b = &regions[i];
72         if (b->start == (intptr_t)(a->start + a->size) && b->type == a->type ) {
73             a->type = -1;
74             b->start = a->start;
75             b->size += a->size;
76         }
77     }
78 
79     /* Count the heaps left after merging */
80     size_t num_heaps = 0;
81     for (size_t i = 0; i < num_regions; i++) {
82         if (regions[i].type != -1) {
83             num_heaps++;
84         }
85     }
86 
87     /* Start by allocating the registered heap data on the stack.
88 
89        Once we have a heap to copy it to, we will copy it to a heap buffer.
90     */
91     heap_t temp_heaps[num_heaps];
92     size_t heap_idx = 0;
93 
94     ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
95     for (size_t i = 0; i < num_regions; i++) {
96         soc_memory_region_t *region = &regions[i];
97         const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
98         heap_t *heap = &temp_heaps[heap_idx];
99         if (region->type == -1) {
100             continue;
101         }
102         heap_idx++;
103         assert(heap_idx <= num_heaps);
104 
105         memcpy(heap->caps, type->caps, sizeof(heap->caps));
106         heap->start = region->start;
107         heap->end = region->start + region->size;
108         MULTI_HEAP_LOCK_INIT(&heap->heap_mux);
109         if (type->startup_stack) {
110             /* Will be registered when OS scheduler starts */
111             heap->heap = NULL;
112         } else {
113             register_heap(heap);
114         }
115         SLIST_NEXT(heap, next) = NULL;
116 
117         ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
118                        region->start, region->size, region->size / 1024, type->name);
119     }
120 
121     assert(heap_idx == num_heaps);
122 
123     /* Allocate the permanent heap data that we'll use as a linked list at runtime.
124 
125        Allocate this part of data contiguously, even though it's a linked list... */
126     assert(SLIST_EMPTY(&registered_heaps));
127 
128     heap_t *heaps_array = NULL;
129     for (size_t i = 0; i < num_heaps; i++) {
130         if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
131             /* use the first DRAM heap which can fit the data */
132             heaps_array = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_heaps);
133             if (heaps_array != NULL) {
134                 break;
135             }
136         }
137     }
138     assert(heaps_array != NULL); /* if NULL, there's not enough free startup heap space */
139 
140     memcpy(heaps_array, temp_heaps, sizeof(heap_t)*num_heaps);
141 
142     /* Iterate the heaps and set their locks, also add them to the linked list. */
143     for (size_t i = 0; i < num_heaps; i++) {
144         if (heaps_array[i].heap != NULL) {
145             multi_heap_set_lock(heaps_array[i].heap, &heaps_array[i].heap_mux);
146         }
147         if (i == 0) {
148             SLIST_INSERT_HEAD(&registered_heaps, &heaps_array[0], next);
149         } else {
150             SLIST_INSERT_AFTER(&heaps_array[i-1], &heaps_array[i], next);
151         }
152     }
153 }
154 
heap_caps_add_region(intptr_t start,intptr_t end)155 esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
156 {
157     if (start == 0) {
158         return ESP_ERR_INVALID_ARG;
159     }
160 
161     for (size_t i = 0; i < soc_memory_region_count; i++) {
162         const soc_memory_region_t *region = &soc_memory_regions[i];
163         // Test requested start only as 'end' may be in a different region entry, assume 'end' has same caps
164         if (region->start <= start && (intptr_t)(region->start + region->size) > start) {
165             const uint32_t *caps = soc_memory_types[region->type].caps;
166             return heap_caps_add_region_with_caps(caps, start, end);
167         }
168     }
169 
170     return ESP_ERR_NOT_FOUND;
171 }
172 
heap_caps_add_region_with_caps(const uint32_t caps[],intptr_t start,intptr_t end)173 esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
174 {
175     esp_err_t err = ESP_FAIL;
176     if (caps == NULL || start == 0 || end == 0 || end <= start) {
177         return ESP_ERR_INVALID_ARG;
178     }
179 
180     //Check if region overlaps the start and/or end of an existing region. If so, the
181     //region is invalid (or maybe added twice)
182     /*
183      *  assume that in on region, start must be less than end (cannot equal to) !!
184      *  Specially, the 4th scenario can be allowed. For example, allocate memory from heap,
185      *  then change the capability and call this function to create a new region for special
186      *  application.
187      *  In the following chart, 'start = start' and 'end = end' is contained in 3rd scenario.
188      *  This all equal scenario is incorrect because the same region cannot be add twice. For example,
189      *  add the .bss memory to region twice, if not do the check, it will cause exception.
190      *
191      *  the existing heap region                                  s(tart)                e(nd)
192      *                                                            |----------------------|
193      *  1.add region  [Correct]   (s1<s && e1<=s)           |-----|
194      *  2.add region  [Incorrect] (s2<=s && s<e2<=e)        |---------------|
195      *  3.add region  [Incorrect] (s3<=s && e<e3)           |-------------------------------------|
196      *  4 add region  [Correct]   (s<s4<e && s<e4<=e)                  |-------|
197      *  5.add region  [Incorrect] (s<s5<e && e<e5)                     |----------------------------|
198      *  6.add region  [Correct]   (e<=s6 && e<e6)                                        |----|
199      */
200 
201     heap_t *heap;
202     SLIST_FOREACH(heap, &registered_heaps, next) {
203         if ((start <= heap->start && end > heap->start)
204                 || (start < heap->end && end > heap->end)) {
205             return ESP_FAIL;
206         }
207     }
208 
209     heap_t *p_new = heap_caps_malloc(sizeof(heap_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
210     if (p_new == NULL) {
211         err = ESP_ERR_NO_MEM;
212         goto done;
213     }
214     memcpy(p_new->caps, caps, sizeof(p_new->caps));
215     p_new->start = start;
216     p_new->end = end;
217     MULTI_HEAP_LOCK_INIT(&p_new->heap_mux);
218     p_new->heap = multi_heap_register((void *)start, end - start);
219     SLIST_NEXT(p_new, next) = NULL;
220     if (p_new->heap == NULL) {
221         err = ESP_ERR_INVALID_SIZE;
222         goto done;
223     }
224     multi_heap_set_lock(p_new->heap, &p_new->heap_mux);
225 
226     /* (This insertion is atomic to registered_heaps, so
227        we don't need to worry about thread safety for readers,
228        only for writers. */
229     static multi_heap_lock_t registered_heaps_write_lock = MULTI_HEAP_LOCK_STATIC_INITIALIZER;
230     MULTI_HEAP_LOCK(&registered_heaps_write_lock);
231     SLIST_INSERT_HEAD(&registered_heaps, p_new, next);
232     MULTI_HEAP_UNLOCK(&registered_heaps_write_lock);
233 
234     err = ESP_OK;
235 
236  done:
237     if (err != ESP_OK) {
238         free(p_new);
239     }
240     return err;
241 }
242