1 /*
2  * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <stdlib.h>
10 #include <assert.h>
11 #include <string.h>
12 #include <stdio.h>
13 #include "sdkconfig.h"
14 #include "esp_attr.h"
15 #include "esp_log.h"
16 #include "hal/mmu_ll.h"
17 #include "hal/mmu_hal.h"
18 #include "hal/cache_hal.h"
19 #include "soc/mmu.h"
20 
21 #include "esp_private/esp_mmu_map_private.h"
22 #include "esp_mmu_map.h"
23 #include "esp_rom_spiflash.h"
24 #if CONFIG_SPIRAM
25 #include "esp_private/esp_psram_extram.h"
26 #include "esp_private/mmu_psram_flash.h"
27 #endif
28 
29 #if CONFIG_IDF_TARGET_ESP32
30 #include "esp_private/esp_cache_esp32_private.h"
31 #endif
32 
33 #include "esp_private/cache_utils.h"
34 #include "spi_flash_mmap.h"
35 #include "esp_heap_caps.h"
36 
37 #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
38 extern int _instruction_reserved_start;
39 extern int _instruction_reserved_end;
40 #endif
41 
42 #if CONFIG_SPIRAM_RODATA
43 extern int _rodata_reserved_start;
44 extern int _rodata_reserved_end;
45 #endif
46 
47 #if !CONFIG_SPI_FLASH_ROM_IMPL
48 
49 
50 typedef struct mmap_block_t {
51     uint32_t *vaddr_list;
52     int list_num;
53 } mmap_block_t;
54 
55 
spi_flash_mmap(size_t src_addr,size_t size,spi_flash_mmap_memory_t memory,const void ** out_ptr,spi_flash_mmap_handle_t * out_handle)56 esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
57                          const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
58 {
59     esp_err_t ret = ESP_FAIL;
60     mmu_mem_caps_t caps = 0;
61     void *ptr = NULL;
62     mmap_block_t *block = NULL;
63     uint32_t *vaddr_list = NULL;
64 
65     block = heap_caps_calloc(1, sizeof(mmap_block_t), MALLOC_CAP_INTERNAL);
66     if (!block) {
67         ret = ESP_ERR_NO_MEM;
68         goto err;
69     }
70 
71     vaddr_list = heap_caps_calloc(1, 1 * sizeof(uint32_t), MALLOC_CAP_INTERNAL);
72     if (!vaddr_list) {
73         ret = ESP_ERR_NO_MEM;
74         goto err;
75     }
76 
77     block->vaddr_list = vaddr_list;
78 
79     if (memory == SPI_FLASH_MMAP_INST) {
80         caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
81     } else {
82         caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
83     }
84     ret = esp_mmu_map(src_addr, size, MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr);
85     if (ret == ESP_OK) {
86         vaddr_list[0] = (uint32_t)ptr;
87         block->list_num = 1;
88 
89     } else if (ret == ESP_ERR_INVALID_STATE) {
90         /**
91          * paddr region is mapped already,
92          * to keep `flash_mmap.c` original behaviour, we consider this as a valid behaviour.
93          * Set `list_num` to 0 so we don't need to call `esp_mmu_unmap` to this one, as `esp_mmu_map`
94          * doesn't really create a new handle.
95          */
96         block->list_num = 0;
97     } else {
98         goto err;
99     }
100 
101     *out_ptr = ptr;
102     *out_handle = (uint32_t)block;
103 
104     return ESP_OK;
105 
106 err:
107     if (vaddr_list) {
108         k_free(vaddr_list);
109     }
110     if (block) {
111         k_free(block);
112     }
113     return ret;
114 }
115 
116 
s_find_non_contiguous_block_nums(const int * pages,int page_count)117 static int s_find_non_contiguous_block_nums(const int *pages, int page_count)
118 {
119     int nums = 1;
120     int last_end = pages[0] + 1;
121 
122     for (int i = 1; i < page_count; i++) {
123         if (pages[i] != last_end) {
124             nums++;
125         }
126         last_end = pages[i] + 1;
127     }
128     return nums;
129 }
130 
s_merge_contiguous_pages(const int * pages,uint32_t page_count,int block_nums,int (* out_blocks)[2])131 static void s_merge_contiguous_pages(const int *pages, uint32_t page_count, int block_nums, int (*out_blocks)[2])
132 {
133     uint32_t last_end = pages[0] + 1;
134     int new_array_id = 0;
135     out_blocks[new_array_id][0] = pages[0];
136     out_blocks[new_array_id][1] = 1;
137 
138     for (int i = 1; i < page_count; i++) {
139 
140         if (pages[i] != last_end) {
141             new_array_id += 1;
142             assert(new_array_id < block_nums);
143             out_blocks[new_array_id][0] = pages[i];
144             out_blocks[new_array_id][1] = 1;
145         } else {
146             out_blocks[new_array_id][1] += 1;
147         }
148 
149         last_end = pages[i] + 1;
150     }
151 }
152 
s_pages_to_bytes(int (* blocks)[2],int block_nums)153 static void s_pages_to_bytes(int (*blocks)[2], int block_nums)
154 {
155     for (int i = 0; i < block_nums; i++) {
156         blocks[i][0] = blocks[i][0] * CONFIG_MMU_PAGE_SIZE;
157         blocks[i][1] = blocks[i][1] * CONFIG_MMU_PAGE_SIZE;
158     }
159 }
160 
spi_flash_mmap_pages(const int * pages,size_t page_count,spi_flash_mmap_memory_t memory,const void ** out_ptr,spi_flash_mmap_handle_t * out_handle)161 esp_err_t spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
162                          const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
163 {
164     esp_err_t ret = ESP_FAIL;
165     mmu_mem_caps_t caps = 0;
166     mmap_block_t *block = NULL;
167     uint32_t *vaddr_list = NULL;
168     int successful_cnt = 0;
169 
170     const int block_num = s_find_non_contiguous_block_nums(pages, page_count);
171     int (*paddr_blocks)[2] = k_malloc(block_num * 2 * sizeof(int));
172 
173     if (paddr_blocks == NULL) {
174         ret = ESP_ERR_NO_MEM;
175         goto err;
176     }
177 
178     s_merge_contiguous_pages(pages, page_count, block_num, paddr_blocks);
179     s_pages_to_bytes(paddr_blocks, block_num);
180 
181     block = heap_caps_calloc(1, sizeof(mmap_block_t), MALLOC_CAP_INTERNAL);
182     if (!block) {
183         ret = ESP_ERR_NO_MEM;
184         goto err;
185     }
186 
187     vaddr_list = heap_caps_calloc(1, block_num * sizeof(uint32_t), MALLOC_CAP_INTERNAL);
188     if (!vaddr_list) {
189         ret = ESP_ERR_NO_MEM;
190         goto err;
191     }
192 
193     if (memory == SPI_FLASH_MMAP_INST) {
194         caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
195     } else {
196         caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
197     }
198     for (int i = 0; i < block_num; i++) {
199         void *ptr = NULL;
200         ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr);
201         if (ret == ESP_OK) {
202             vaddr_list[i] = (uint32_t)ptr;
203             successful_cnt++;
204         } else {
205             /**
206              * A note for `ret == ESP_ERR_INVALID_STATE`:
207              * If one of the `*pages` are mapped already, this means we can't find a
208              * consecutive vaddr block for these `*pages`
209              */
210             goto err;
211         }
212         vaddr_list[i] = (uint32_t)ptr;
213     }
214 
215     block->vaddr_list = vaddr_list;
216     block->list_num = successful_cnt;
217 
218     /**
219      * We get a contiguous vaddr block, but may contain multiple esp_mmu handles.
220      * The first handle vaddr is the start address of this contiguous vaddr block.
221      */
222     *out_ptr = (void *)vaddr_list[0];
223     *out_handle = (uint32_t)block;
224 
225     k_free(paddr_blocks);
226     return ESP_OK;
227 
228 err:
229     for (int i = 0; i < successful_cnt; i++) {
230         esp_mmu_unmap((void *)vaddr_list[i]);
231     }
232     if (vaddr_list) {
233         k_free(vaddr_list);
234     }
235     if (block) {
236         k_free(block);
237     }
238     if (paddr_blocks) {
239         k_free(paddr_blocks);
240     }
241     return ret;
242 }
243 
244 
spi_flash_munmap(spi_flash_mmap_handle_t handle)245 void spi_flash_munmap(spi_flash_mmap_handle_t handle)
246 {
247     esp_err_t ret = ESP_FAIL;
248     mmap_block_t *block = (void *)handle;
249 
250     for (int i = 0; i < block->list_num; i++) {
251         ret = esp_mmu_unmap((void *)block->vaddr_list[i]);
252         if (ret == ESP_ERR_NOT_FOUND) {
253             assert(0 && "invalid handle, or handle already unmapped");
254         }
255     }
256 
257     k_free(block->vaddr_list);
258     k_free(block);
259 }
260 
261 
spi_flash_mmap_dump(void)262 void spi_flash_mmap_dump(void)
263 {
264     esp_mmu_map_dump_mapped_blocks(stdout);
265 }
266 
267 
spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)268 uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
269 {
270     mmu_mem_caps_t caps = 0;
271     if (memory == SPI_FLASH_MMAP_INST) {
272         caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
273     } else {
274         caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
275     }
276 
277     size_t len = 0;
278     esp_mmu_map_get_max_consecutive_free_block_size(caps, MMU_TARGET_FLASH0, &len);
279     return len / CONFIG_MMU_PAGE_SIZE;
280 }
281 
spi_flash_phys2cache(size_t phys_offs,spi_flash_mmap_memory_t memory)282 const void * spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memory)
283 {
284     esp_err_t ret = ESP_FAIL;
285     void *ptr = NULL;
286     mmu_target_t target = MMU_TARGET_FLASH0;
287 
288     __attribute__((unused)) uint32_t phys_page = phys_offs / CONFIG_MMU_PAGE_SIZE;
289 #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
290     if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) {
291         target = MMU_TARGET_PSRAM0;
292         phys_offs -= instruction_flash2spiram_offset() * CONFIG_MMU_PAGE_SIZE;
293     }
294 #endif
295 
296 #if CONFIG_SPIRAM_RODATA
297     if (phys_page >= rodata_flash_start_page_get() && phys_page <= rodata_flash_start_page_get()) {
298         target = MMU_TARGET_PSRAM0;
299         phys_offs -= rodata_flash2spiram_offset() * CONFIG_MMU_PAGE_SIZE;
300     }
301 #endif
302 
303     mmu_vaddr_t type = (memory == SPI_FLASH_MMAP_DATA) ? MMU_VADDR_DATA : MMU_VADDR_INSTRUCTION;
304     ret = esp_mmu_paddr_to_vaddr(phys_offs, target, type, &ptr);
305     if (ret == ESP_ERR_NOT_FOUND) {
306         return NULL;
307     }
308     assert(ret == ESP_OK);
309     return (const void *)ptr;
310 }
311 
is_page_mapped_in_cache(uint32_t phys_addr,const void ** out_ptr)312 static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_addr, const void **out_ptr)
313 {
314     *out_ptr = NULL;
315     mmu_mem_caps_t caps = 0;
316 
317     esp_err_t err = esp_mmu_paddr_find_caps(phys_addr, &caps);
318     if (err == ESP_OK) {
319     // On ESP32, we will always flush all, so always return true, and don't care the vaddr
320 #if !CONFIG_IDF_TARGET_ESP32
321         uint32_t vaddr = 0;
322         if (caps & MMU_MEM_CAP_EXEC) {
323             mmu_hal_paddr_to_vaddr(0, phys_addr, MMU_TARGET_FLASH0, MMU_VADDR_INSTRUCTION, &vaddr);
324         } else {
325             mmu_hal_paddr_to_vaddr(0, phys_addr, MMU_TARGET_FLASH0, MMU_VADDR_DATA, &vaddr);
326         }
327         *out_ptr = (void *)vaddr;
328 #endif
329         return true;
330     }
331     return false;
332 }
333 
334 /* Validates if given flash address has corresponding cache mapping, if yes, flushes cache memories */
spi_flash_check_and_flush_cache(size_t start_addr,size_t length)335 IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length)
336 {
337     bool ret = false;
338     /* align start_addr & length to full MMU pages */
339     uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
340     length += (start_addr - page_start_addr);
341     length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
342     for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
343         if (addr >= g_rom_flashchip.chip_size) {
344             return false; /* invalid address */
345         }
346 
347         const void *vaddr = NULL;
348         if (is_page_mapped_in_cache(addr, &vaddr)) {
349 #if CONFIG_IDF_TARGET_ESP32
350             cache_sync();
351             return true;
352 #else // CONFIG_IDF_TARGET_ESP32
353             if (vaddr != NULL) {
354                 cache_hal_invalidate_addr((uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE);
355                 ret = true;
356             }
357 #endif // CONFIG_IDF_TARGET_ESP32
358 
359         }
360     }
361     return ret;
362 }
363 #endif //!CONFIG_SPI_FLASH_ROM_IMPL
364 
365 #if !CONFIG_SPI_FLASH_ROM_IMPL || CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
366 //The ROM implementation returns physical address of the PSRAM when the .text or .rodata is in the PSRAM.
367 //Always patch it when SPIRAM_FETCH_INSTRUCTIONS or SPIRAM_RODATA is set.
spi_flash_cache2phys(const void * cached)368 size_t spi_flash_cache2phys(const void *cached)
369 {
370     if (cached == NULL) {
371         return SPI_FLASH_CACHE2PHYS_FAIL;
372     }
373 
374     esp_err_t ret = ESP_FAIL;
375     uint32_t paddr = 0;
376     mmu_target_t target = 0;
377 
378     ret = esp_mmu_vaddr_to_paddr((void *)cached, &paddr, &target);
379     if (ret != ESP_OK) {
380         return SPI_FLASH_CACHE2PHYS_FAIL;
381     }
382 
383     int offset = 0;
384 #if CONFIG_SPIRAM_RODATA
385     if ((uint32_t)cached >= (uint32_t)&_rodata_reserved_start && (uint32_t)cached <= (uint32_t)&_rodata_reserved_end) {
386         offset = rodata_flash2spiram_offset();
387     }
388 #endif
389 #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
390     if ((uint32_t)cached >= (uint32_t)&_instruction_reserved_start && (uint32_t)cached <= (uint32_t)&_instruction_reserved_end) {
391         offset = instruction_flash2spiram_offset();
392     }
393 #endif
394 
395     return paddr + offset * CONFIG_MMU_PAGE_SIZE;
396 }
397 #endif
398