1 /*
2  * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <string.h>
9 #include <sys/param.h>
10 #include <sys/queue.h>
11 #include <inttypes.h>
12 #include "sdkconfig.h"
13 #include "esp_attr.h"
14 #include "esp_log.h"
15 #include "esp_check.h"
16 #include "esp_heap_caps.h"
17 
18 #include "soc/soc_caps.h"
19 #include "hal/cache_types.h"
20 #include "hal/cache_hal.h"
21 #include "hal/cache_ll.h"
22 #include "hal/mmu_types.h"
23 #include "hal/mmu_hal.h"
24 #include "hal/mmu_ll.h"
25 
26 #include "esp_private/cache_utils.h"
27 #include "esp_private/esp_cache_esp32_private.h"
28 #include "esp_private/esp_mmu_map_private.h"
29 #include "ext_mem_layout.h"
30 #include "esp_mmu_map.h"
31 
32 
33 //This is for size align
34 #define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
35 //This is for vaddr align
36 #define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
37 
38 //This flag indicates the memory region is merged, we don't care about it anymore
39 #define MEM_REGION_MERGED             -1
40 
41 /**
42  * We have some hw related tests for vaddr region capabilites
43  * Use this macro to disable paddr check as we need to reuse certain paddr blocks
44  */
45 #define ENABLE_PADDR_CHECK            !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
46 
47 static DRAM_ATTR const char *TAG = "mmap";
48 
49 /**
50  * @brief MMU Memory Mapping Driver
51  *
52  * Driver Backgrounds:
53  *
54  * --------------------------------------------------------------------------------------------------------
55  *                                            Memory Pool                                                 |
56  * --------------------------------------------------------------------------------------------------------
57  * |                       Memory Region 0                              | Memory Region 1 |     ...       |
58  * --------------------------------------------------------------------------------------------------------
59  * | Block 0 | Slot 0 | Block 1 | Block 2 |  ...  | Slot 1 (final slot) |          ...                    |
60  * --------------------------------------------------------------------------------------------------------
61  *
62  * - A block is a piece of vaddr range that is dynamically mapped. Blocks are doubly linked:
63  *   Block 0 <-> Block 1 <-> Block 2
64  * - A Slot is the vaddr range between 2 blocks.
65  */
66 
67 /**
68  * Struct for a block
69  */
70 typedef struct mem_block_ {
71     uint32_t laddr_start;  //linear address start of this block
72     uint32_t laddr_end;    //linear address end of this block
73     intptr_t vaddr_start;  //virtual address start of this block
74     intptr_t vaddr_end;    //virtual address end of this block
75     size_t size;           //size of this block, should be aligned to MMU page size
76     int caps;              //caps of this block, `mmu_mem_caps_t`
77     uint32_t paddr_start;  //physical address start of this block
78     uint32_t paddr_end;    //physical address end of this block
79     mmu_target_t target;   //physical target that this block is mapped to
80     TAILQ_ENTRY(mem_block_) entries;  //link entry
81 } mem_block_t;
82 
83 /**
84  * Struct for a memory region
85  */
86 typedef struct mem_region_ {
87     cache_bus_mask_t bus_id;  //cache bus mask of this region
88     uint32_t start;           //linear address start of this region
89     uint32_t end;             //linear address end of this region
90     size_t region_size;       //region size, in bytes
91     uint32_t free_head;       //linear address free head of this region
92     size_t max_slot_size;     //max slot size within this region
93     int caps;                 //caps of this region, `mmu_mem_caps_t`
94     mmu_target_t targets;     //physical targets that this region is supported
95     TAILQ_HEAD(mem_block_head_, mem_block_) mem_block_head;      //link head of allocated blocks within this region
96 } mem_region_t;
97 
98 typedef struct {
99     /**
100      * number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
101      */
102     uint32_t num_regions;
103     /**
104      * This saves the available MMU linear address regions,
105      * after reserving flash .rodata and .text, and after coalescing.
106      * Only the first `num_regions` items are valid
107      */
108     mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
109 } mmu_ctx_t;
110 
111 
112 static mmu_ctx_t s_mmu_ctx;
113 
114 #if ENABLE_PADDR_CHECK
115 static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
116 static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size);
117 #endif  //#if ENABLE_PADDR_CHECK
118 
119 
120 #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
s_reserve_irom_region(mem_region_t * hw_mem_regions,int region_nums)121 static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
122 {
123     /**
124      * We follow the way how 1st bootloader load flash .text:
125      *
126      * - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
127      *   we strongly rely on this to calculate the .text length
128      */
129     extern int _instruction_reserved_start;
130     extern int _instruction_reserved_end;
131     size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
132     assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
133 
134     irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
135     irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
136     cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
137 
138     for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
139         if (bus_mask & hw_mem_regions[i].bus_id) {
140             if (hw_mem_regions[i].region_size <= irom_len_to_reserve) {
141                 hw_mem_regions[i].free_head = hw_mem_regions[i].end;
142                 hw_mem_regions[i].max_slot_size = 0;
143                 irom_len_to_reserve -= hw_mem_regions[i].region_size;
144             } else {
145                 hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
146                 hw_mem_regions[i].max_slot_size -= irom_len_to_reserve;
147             }
148         }
149     }
150 }
151 
s_reserve_drom_region(mem_region_t * hw_mem_regions,int region_nums)152 static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
153 {
154     /**
155      * Similarly, we follow the way how 1st bootloader load flash .rodata:
156      */
157     extern int _rodata_reserved_start;
158     extern int _rodata_reserved_end;
159     size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
160     assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
161 
162     drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
163     drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
164     cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
165 
166     for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
167         if (bus_mask & hw_mem_regions[i].bus_id) {
168             if (hw_mem_regions[i].region_size <= drom_len_to_reserve) {
169                 hw_mem_regions[i].free_head = hw_mem_regions[i].end;
170                 hw_mem_regions[i].max_slot_size = 0;
171                 drom_len_to_reserve -= hw_mem_regions[i].region_size;
172             } else {
173                 hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
174                 hw_mem_regions[i].max_slot_size -= drom_len_to_reserve;
175             }
176         }
177     }
178 }
179 #endif  //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
180 
esp_mmu_map_init(void)181 void esp_mmu_map_init(void)
182 {
183     mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
184 
185     for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
186         hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
187         hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
188         hw_mem_regions[i].region_size = g_mmu_mem_regions[i].size;
189         hw_mem_regions[i].max_slot_size = g_mmu_mem_regions[i].size;
190         hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
191         hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
192         hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
193         hw_mem_regions[i].targets = g_mmu_mem_regions[i].targets;
194 #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
195         assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
196 #endif
197         assert(hw_mem_regions[i].region_size % CONFIG_MMU_PAGE_SIZE == 0);
198     }
199 
200 #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
201     //First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
202     s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
203     s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
204 #endif  //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
205 
206     if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
207         //Now we can coalesce adjacent regions
208         for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
209             mem_region_t *a = &hw_mem_regions[i - 1];
210             mem_region_t *b = &hw_mem_regions[i];
211             if ((b->free_head == a->end) && (b->caps == a->caps) && (b->targets == a->targets)) {
212                 a->caps = MEM_REGION_MERGED;
213                 b->bus_id |= a->bus_id;
214                 b->start = a->start;
215                 b->region_size += a->region_size;
216                 b->free_head = a->free_head;
217                 b->max_slot_size += a->max_slot_size;
218             }
219         }
220     }
221 
222     //Count the mem regions left after coalescing
223     uint32_t region_num = 0;
224     for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
225         if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
226             region_num++;
227         }
228     }
229     ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
230 
231     //Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
232     uint32_t available_region_idx = 0;
233     s_mmu_ctx.num_regions = region_num;
234     for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
235         if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
236             continue;
237         }
238 
239         memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mem_region_t));
240         available_region_idx++;
241     }
242 
243     for (int i = 0; i < available_region_idx; i++) {
244         TAILQ_INIT(&s_mmu_ctx.mem_regions[i].mem_block_head);
245     }
246 
247     assert(available_region_idx == region_num);
248 }
249 
250 
s_mem_caps_check(mmu_mem_caps_t caps)251 static esp_err_t s_mem_caps_check(mmu_mem_caps_t caps)
252 {
253     if (caps & MMU_MEM_CAP_EXEC) {
254         if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
255             //None of the executable memory are expected to be 8-bit accessible or writable.
256             return ESP_ERR_INVALID_ARG;
257         }
258         caps |= MMU_MEM_CAP_32BIT;
259     }
260     return ESP_OK;
261 }
262 
esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps,mmu_target_t target,size_t * out_len)263 esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len)
264 {
265     ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
266     ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
267     *out_len = 0;
268 
269     size_t max = 0;
270 
271     for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
272         if (((s_mmu_ctx.mem_regions[i].caps & caps) == caps) && ((s_mmu_ctx.mem_regions[i].targets & target) == target)) {
273             if (s_mmu_ctx.mem_regions[i].max_slot_size > max) {
274                 max = s_mmu_ctx.mem_regions[i].max_slot_size;
275             }
276         }
277     }
278 
279     *out_len = max;
280 
281     return ESP_OK;
282 }
283 
284 
s_find_available_region(mem_region_t * mem_regions,uint32_t region_nums,size_t size,mmu_mem_caps_t caps,mmu_target_t target)285 static int32_t s_find_available_region(mem_region_t *mem_regions, uint32_t region_nums, size_t size, mmu_mem_caps_t caps, mmu_target_t target)
286 {
287     int32_t found_region_id = -1;
288     for (int i = 0; i < region_nums; i++) {
289         if (((mem_regions[i].caps & caps) == caps) && ((mem_regions[i].targets & target) == target)) {
290             if (mem_regions[i].max_slot_size >= size) {
291                 found_region_id = i;
292                 break;
293             }
294         }
295     }
296     return found_region_id;
297 }
298 
esp_mmu_map_reserve_block_with_caps(size_t size,mmu_mem_caps_t caps,mmu_target_t target,const void ** out_ptr)299 esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr)
300 {
301     ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
302     ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
303 
304     size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
305     uint32_t laddr = 0;
306 
307     int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
308     if (found_region_id == -1) {
309         ESP_EARLY_LOGE(TAG, "no such vaddr range");
310         return ESP_ERR_NOT_FOUND;
311     }
312 
313     laddr = (uint32_t)s_mmu_ctx.mem_regions[found_region_id].free_head;
314     s_mmu_ctx.mem_regions[found_region_id].free_head += aligned_size;
315     s_mmu_ctx.mem_regions[found_region_id].max_slot_size -= aligned_size;
316     ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
317 
318     uint32_t vaddr = 0;
319     if (caps & MMU_MEM_CAP_EXEC) {
320         vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
321     } else {
322         vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
323     }
324     *out_ptr = (void *)vaddr;
325 
326     return ESP_OK;
327 }
328 
esp_mmu_paddr_find_caps(const esp_paddr_t paddr,mmu_mem_caps_t * out_caps)329 IRAM_ATTR esp_err_t esp_mmu_paddr_find_caps(const esp_paddr_t paddr, mmu_mem_caps_t *out_caps)
330 {
331     mem_region_t *region = NULL;
332     mem_block_t *mem_block = NULL;
333     bool found = false;
334     mem_block_t *found_block = NULL;
335     if (out_caps == NULL) {
336         return ESP_ERR_INVALID_ARG;
337     }
338 
339 
340     for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
341         region = &s_mmu_ctx.mem_regions[i];
342 
343         TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
344             if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
345                 //we don't care the dummy_head and the dummy_tail
346                 continue;
347             }
348 
349             //now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
350             if (mem_block->paddr_start == paddr) {
351                 found = true;
352                 found_block = mem_block;
353                 break;
354             }
355         }
356     }
357 
358     if (!found) {
359         return ESP_ERR_NOT_FOUND;
360     }
361 
362     *out_caps = found_block->caps;
363     return ESP_OK;
364 }
365 
366 
s_do_cache_invalidate(uint32_t vaddr_start,uint32_t size)367 static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, uint32_t size)
368 {
369 #if CONFIG_IDF_TARGET_ESP32
370     /**
371      * On ESP32, due to hardware limitation, we don't have an
372      * easy way to sync between cache and external memory wrt
373      * certain range. So we do a full sync here
374      */
375     cache_sync();
376 #else   //Other chips
377     cache_hal_invalidate_addr(vaddr_start, size);
378 #endif // CONFIG_IDF_TARGET_ESP32
379 }
380 
s_do_mapping(mmu_target_t target,uint32_t vaddr_start,esp_paddr_t paddr_start,uint32_t size)381 static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
382 {
383     /**
384      * Disable Cache, after this function, involved code and data should be placed in internal RAM.
385      *
386      * @note we call this for now, but this will be refactored to move out of `spi_flash`
387      */
388     spi_flash_disable_interrupts_caches_and_other_cpu();
389 
390     uint32_t actual_mapped_len = 0;
391     mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len);
392 #if (SOC_MMU_PERIPH_NUM == 2)
393 #if !CONFIG_FREERTOS_UNICORE
394     mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len);
395 #endif //  #if !CONFIG_FREERTOS_UNICORE
396 #endif //  #if (SOC_MMU_PERIPH_NUM == 2)
397 
398     cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
399     cache_ll_l1_enable_bus(0, bus_mask);
400 #if !CONFIG_FREERTOS_UNICORE
401     bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
402     cache_ll_l1_enable_bus(1, bus_mask);
403 #endif
404 
405     s_do_cache_invalidate(vaddr_start, size);
406 
407     //enable Cache, after this function, internal RAM access is no longer mandatory
408     spi_flash_enable_interrupts_caches_and_other_cpu();
409 
410     ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
411 }
412 
esp_mmu_map(esp_paddr_t paddr_start,size_t size,mmu_target_t target,mmu_mem_caps_t caps,int flags,void ** out_ptr)413 esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr)
414 {
415     esp_err_t ret = ESP_FAIL;
416     ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
417 #if !SOC_SPIRAM_SUPPORTED || CONFIG_IDF_TARGET_ESP32
418     ESP_RETURN_ON_FALSE(!(target & MMU_TARGET_PSRAM0), ESP_ERR_NOT_SUPPORTED, TAG, "PSRAM is not supported");
419 #endif
420     ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
421     ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
422 
423     size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
424     int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
425     if (found_region_id == -1) {
426         ESP_EARLY_LOGE(TAG, "no such vaddr range");
427         return ESP_ERR_NOT_FOUND;
428     }
429 
430     //Now we're sure we can find an available block inside a certain region
431     mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
432     mem_block_t *dummy_head = NULL;
433     mem_block_t *dummy_tail = NULL;
434     mem_block_t *new_block = NULL;
435 
436     if (TAILQ_EMPTY(&found_region->mem_block_head)) {
437         dummy_head = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
438         ESP_GOTO_ON_FALSE(dummy_head, ESP_ERR_NO_MEM, err, TAG, "no mem");
439 
440         dummy_head->laddr_start = found_region->free_head;
441         dummy_head->laddr_end = found_region->free_head;
442         //We don't care vaddr or paddr address for dummy head
443         dummy_head->size = 0;
444         dummy_head->caps = caps;
445         TAILQ_INSERT_HEAD(&found_region->mem_block_head, dummy_head, entries);
446 
447         dummy_tail = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
448         ESP_GOTO_ON_FALSE(dummy_tail, ESP_ERR_NO_MEM, err, TAG, "no mem");
449 
450         dummy_tail->laddr_start = found_region->end;
451         dummy_tail->laddr_end = found_region->end;
452         //We don't care vaddr or paddr address for dummy tail
453         dummy_tail->size = 0;
454         dummy_tail->caps = caps;
455         TAILQ_INSERT_TAIL(&found_region->mem_block_head, dummy_tail, entries);
456     }
457 
458     //Check if paddr is overlapped
459     mem_block_t *mem_block = NULL;
460 
461 #if ENABLE_PADDR_CHECK
462     bool is_enclosed = false;
463     bool is_overlapped = false;
464     bool allow_overlap = flags & ESP_MMU_MMAP_FLAG_PADDR_SHARED;
465 
466     TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
467         if (target == mem_block->target) {
468             if ((s_is_enclosed(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
469                 //the to-be-mapped paddr block is mapped already
470                 is_enclosed = true;
471                 break;
472             }
473 
474             if (!allow_overlap && (s_is_overlapped(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) {
475                 is_overlapped = true;
476                 break;
477             }
478         }
479     }
480 
481     if (is_enclosed) {
482         ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
483         *out_ptr = (void *)mem_block->vaddr_start;
484         return ESP_ERR_INVALID_STATE;
485     }
486 
487     if (!allow_overlap && is_overlapped) {
488         ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block");
489         return ESP_ERR_INVALID_ARG;
490     }
491 #endif //#if ENABLE_PADDR_CHECK
492 
493     new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
494     ESP_GOTO_ON_FALSE(new_block, ESP_ERR_NO_MEM, err, TAG, "no mem");
495 
496     //Reserve this block as it'll be mapped
497     bool found = false;
498     // Get the end address of the dummy_head block, which is always first block on the list
499     uint32_t last_end = TAILQ_FIRST(&found_region->mem_block_head)->laddr_end;
500     size_t slot_len = 0;
501     size_t max_slot_len = 0;
502     mem_block_t *found_block = NULL;  //This stands for the block we found, whose slot between its prior block is where we will insert the new block to
503 
504     TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
505         slot_len = mem_block->laddr_start - last_end;
506 
507         if (!found) {
508             if (slot_len >= aligned_size) {
509                 //Found it
510                 found = true;
511                 found_block = mem_block;
512                 slot_len -= aligned_size;
513                 new_block->laddr_start = last_end;
514             }
515         }
516 
517         max_slot_len = (slot_len > max_slot_len) ? slot_len : max_slot_len;
518         last_end = mem_block->laddr_end;
519     }
520 
521     assert(found);
522     //insert the to-be-mapped new block to the list
523     TAILQ_INSERT_BEFORE(found_block, new_block, entries);
524 
525     //Finally, we update the max_slot_size
526     found_region->max_slot_size = max_slot_len;
527 
528     //Now we fill others according to the found `new_block->laddr_start`
529     new_block->laddr_end = new_block->laddr_start + aligned_size;
530     new_block->size = aligned_size;
531     new_block->caps = caps;
532     if (caps & MMU_MEM_CAP_EXEC) {
533         new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION);
534         new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION);
535     } else {
536         new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA);
537         new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA);
538     }
539     new_block->paddr_start = paddr_start;
540     new_block->paddr_end = paddr_start + aligned_size;
541     new_block->target = target;
542 
543     //do mapping
544     s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
545     *out_ptr = (void *)new_block->vaddr_start;
546 
547     return ESP_OK;
548 
549 err:
550     if (dummy_tail) {
551         heap_caps_free(dummy_tail);
552     }
553     if (dummy_head) {
554         heap_caps_free(dummy_head);
555     }
556 
557     return ret;
558 }
559 
560 
s_do_unmapping(uint32_t vaddr_start,uint32_t size)561 static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size)
562 {
563     /**
564      * Disable Cache, after this function, involved code and data should be placed in internal RAM.
565      *
566      * @note we call this for now, but this will be refactored to move out of `spi_flash`
567      */
568     spi_flash_disable_interrupts_caches_and_other_cpu();
569 
570     mmu_hal_unmap_region(0, vaddr_start, size);
571 #if (SOC_MMU_PERIPH_NUM == 2)
572 #if !CONFIG_FREERTOS_UNICORE
573     mmu_hal_unmap_region(1, vaddr_start, size);
574 #endif //  #if !CONFIG_FREERTOS_UNICORE
575 #endif //  #if (SOC_MMU_PERIPH_NUM == 2)
576 
577     //enable Cache, after this function, internal RAM access is no longer mandatory
578     spi_flash_enable_interrupts_caches_and_other_cpu();
579 }
580 
esp_mmu_unmap(void * ptr)581 esp_err_t esp_mmu_unmap(void *ptr)
582 {
583     ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
584 
585     mem_region_t *region = NULL;
586     mem_block_t *mem_block = NULL;
587     uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
588     size_t slot_len = 0;
589 
590     for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
591         if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
592             region = &s_mmu_ctx.mem_regions[i];
593         }
594     }
595     ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
596 
597     bool found = false;
598     mem_block_t *found_block = NULL;
599     TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
600         if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
601             //we don't care the dummy_head and the dummy_tail
602             continue;
603         }
604 
605         //now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
606         if (mem_block->laddr_start == ptr_laddr) {
607             slot_len = TAILQ_NEXT(mem_block, entries)->laddr_start - TAILQ_PREV(mem_block, mem_block_head_, entries)->laddr_end;
608             region->max_slot_size = (slot_len > region->max_slot_size) ? slot_len : region->max_slot_size;
609 
610             found = true;
611             found_block = mem_block;
612             break;
613         }
614     }
615 
616     ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
617 
618     //do unmap
619     s_do_unmapping(mem_block->vaddr_start, mem_block->size);
620     //remove the already unmapped block from the list
621     TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
622     heap_caps_free(found_block);
623 
624     return ESP_OK;
625 }
626 
esp_mmu_map_dump_mapped_blocks(FILE * stream)627 esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)
628 {
629     char line[100];
630     for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
631         fprintf(stream, "region %d:\n", i);
632         fprintf(stream, "%-15s %-14s %-14s %-12s %-12s %-12s\n", "Bus ID", "Start", "Free Head", "End", "Caps", "Max Slot Size");
633 
634         char *buf = line;
635         size_t len = sizeof(line);
636         memset(line, 0x0, len);
637         snprintf(buf, len, "0x%-13x 0x%-12"PRIx32" 0x%-11"PRIx32"  0x%-10"PRIx32" 0x%-10x 0x%-8x\n",
638                  s_mmu_ctx.mem_regions[i].bus_id,
639                  s_mmu_ctx.mem_regions[i].start,
640                  s_mmu_ctx.mem_regions[i].free_head,
641                  s_mmu_ctx.mem_regions[i].end,
642                  s_mmu_ctx.mem_regions[i].caps,
643                  s_mmu_ctx.mem_regions[i].max_slot_size);
644         fputs(line, stream);
645 
646         fprintf(stream, "mapped blocks:\n");
647         fprintf(stream, "%-4s %-13s %-12s %-12s %-6s %-13s %-11s\n", "ID", "Vaddr Start", "Vaddr End", "Block Size", "Caps", "Paddr Start", "Paddr End");
648         mem_region_t *region = &s_mmu_ctx.mem_regions[i];
649         mem_block_t *mem_block = NULL;
650         int id = 0;
651         TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
652             if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
653                 snprintf(buf, len, "%-4d 0x%-11x 0x%-10x 0x%-10x 0x%-4x 0x%-11"PRIx32" 0x%-8"PRIx32"\n",
654                          id,
655                          (uint32_t) mem_block->vaddr_start,
656                          (uint32_t) mem_block->vaddr_end,
657                          mem_block->size,
658                          mem_block->caps,
659                          mem_block->paddr_start,
660                          mem_block->paddr_end);
661                 fputs(line, stream);
662                 id++;
663             }
664         }
665         fprintf(stream, "\n");
666     }
667 
668     return ESP_OK;
669 }
670 
671 /*---------------------------------------------------------------
672         Private dump functions, IRAM Safe
673 ---------------------------------------------------------------*/
esp_mmu_map_dump_mapped_blocks_private(void)674 esp_err_t IRAM_ATTR esp_mmu_map_dump_mapped_blocks_private(void)
675 {
676     for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
677         mem_region_t *region = &s_mmu_ctx.mem_regions[i];
678         mem_block_t *mem_block = NULL;
679         TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
680             if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
681                 ESP_DRAM_LOGI(TAG, "block vaddr_start: 0x%x", mem_block->vaddr_start);
682                 ESP_DRAM_LOGI(TAG, "block vaddr_end: 0x%x", mem_block->vaddr_end);
683                 ESP_DRAM_LOGI(TAG, "block size: 0x%x", mem_block->size);
684                 ESP_DRAM_LOGI(TAG, "block caps: 0x%x\n", mem_block->caps);
685                 ESP_DRAM_LOGI(TAG, "block paddr_start: 0x%x\n", mem_block->paddr_start);
686                 ESP_DRAM_LOGI(TAG, "block paddr_end: 0x%x\n", mem_block->paddr_end);
687             }
688         }
689         ESP_DRAM_LOGI(TAG, "region bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
690         ESP_DRAM_LOGI(TAG, "region start: 0x%x", s_mmu_ctx.mem_regions[i].start);
691         ESP_DRAM_LOGI(TAG, "region end: 0x%x", s_mmu_ctx.mem_regions[i].end);
692         ESP_DRAM_LOGI(TAG, "region caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
693     }
694 
695     return ESP_OK;
696 }
697 
698 
699 /*---------------------------------------------------------------
700     Helper APIs for conversion between vaddr and paddr
701 ---------------------------------------------------------------*/
s_vaddr_to_paddr(uint32_t vaddr,esp_paddr_t * out_paddr,mmu_target_t * out_target)702 static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
703 {
704     //we call this for now, but this will be refactored to move out of `spi_flash`
705     spi_flash_disable_interrupts_caches_and_other_cpu();
706     //On ESP32, core 1 settings should be the same as the core 0
707     bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
708     spi_flash_enable_interrupts_caches_and_other_cpu();
709 
710     return is_mapped;
711 }
712 
esp_mmu_vaddr_to_paddr(void * vaddr,esp_paddr_t * out_paddr,mmu_target_t * out_target)713 esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
714 {
715     ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
716     ESP_RETURN_ON_FALSE(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
717 
718     esp_paddr_t paddr = 0;
719     mmu_target_t target = 0;
720 
721     bool is_mapped = s_vaddr_to_paddr((uint32_t)vaddr, &paddr, &target);
722     ESP_RETURN_ON_FALSE(is_mapped, ESP_ERR_NOT_FOUND, TAG, "vaddr isn't mapped");
723 
724     *out_paddr = paddr;
725     *out_target = target;
726 
727     return ESP_OK;
728 }
729 
730 
s_paddr_to_vaddr(esp_paddr_t paddr,mmu_target_t target,mmu_vaddr_t type,uint32_t * out_vaddr)731 static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
732 {
733     //we call this for now, but this will be refactored to move out of `spi_flash`
734     spi_flash_disable_interrupts_caches_and_other_cpu();
735     //On ESP32, core 1 settings should be the same as the core 0
736     bool found = mmu_hal_paddr_to_vaddr(0, paddr, target, type, out_vaddr);
737     spi_flash_enable_interrupts_caches_and_other_cpu();
738 
739     return found;
740 }
741 
esp_mmu_paddr_to_vaddr(esp_paddr_t paddr,mmu_target_t target,mmu_vaddr_t type,void ** out_vaddr)742 esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr)
743 {
744     ESP_RETURN_ON_FALSE(out_vaddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
745 
746     uint32_t vaddr = 0;
747     bool found = false;
748 
749     found = s_paddr_to_vaddr(paddr, target, type, &vaddr);
750     ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "paddr isn't mapped");
751 
752     *out_vaddr = (void *)vaddr;
753 
754     return ESP_OK;
755 }
756 
757 
758 #if ENABLE_PADDR_CHECK
759 /*---------------------------------------------------------------
760     Helper functions to check block
761 ---------------------------------------------------------------*/
762 /**
763  * Check if a new block is enclosed by another, e.g.
764  *
765  * This is enclosed:
766  *
767  *       new_block_start               new_block_end
768  *              |-------- New Block --------|
769  *      |--------------- Block ---------------|
770  * block_start                              block_end
771  *
772  * @note Note the difference between `s_is_overlapped()` below
773  *
774  * @param block_start     An original block start
775  * @param block_end       An original block end
776  * @param new_block_start New block start
777  * @param new_block_size  New block size
778  *
779  * @return True: new block is enclosed; False: new block is not enclosed
780  */
s_is_enclosed(uint32_t block_start,uint32_t block_end,uint32_t new_block_start,uint32_t new_block_size)781 static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
782 {
783     bool is_enclosed = false;
784     uint32_t new_block_end = new_block_start + new_block_size;
785 
786     if ((new_block_start >= block_start) && (new_block_end <= block_end)) {
787         is_enclosed = true;
788     } else {
789         is_enclosed = false;
790     }
791 
792     return is_enclosed;
793 }
794 
795 /**
796  * Check if a new block is overlapped by another, e.g.
797  *
798  * This is overlapped:
799  *
800  *       new_block_start                 new_block_end
801  *              |---------- New Block ----------|
802  *      |--------------- Block ---------------|
803  * block_start                              block_end
804  *
805  * @note Note the difference between `s_is_enclosed()` above
806  *
807  * @param block_start     An original block start
808  * @param block_end       An original block end
809  * @param new_block_start New block start
810  * @param new_block_size  New block size
811  *
812  * @return True: new block is overlapped; False: new block is not overlapped
813  */
s_is_overlapped(uint32_t block_start,uint32_t block_end,uint32_t new_block_start,uint32_t new_block_size)814 static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size)
815 {
816     bool is_overlapped = false;
817     uint32_t new_block_end = new_block_start + new_block_size;
818 
819     if (((new_block_start < block_start) && (new_block_end > block_start)) ||
820         ((new_block_start < block_end) && (new_block_end > block_end))) {
821         is_overlapped = true;
822     } else {
823         is_overlapped = false;
824     }
825 
826     return is_overlapped;
827 }
828 #endif  //#if ENABLE_PADDR_CHECK
829