1 /*
2 * Copyright (c) 2024 Espressif Systems (Shanghai) Co., Ltd.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <soc.h>
8 #include <hal/mmu_hal.h>
9 #include <hal/mmu_types.h>
10 #include <hal/cache_types.h>
11 #include <hal/cache_ll.h>
12 #include <hal/cache_hal.h>
13 #include <rom/cache.h>
14 #include <esp_rom_sys.h>
15 #include <esp_err.h>
16
17 #include <esp_app_format.h>
18 #include <zephyr/storage/flash_map.h>
19 #include <esp_rom_uart.h>
20 #include <esp_flash.h>
21 #include <esp_log.h>
22 #include <bootloader_clock.h>
23 #include <bootloader_common.h>
24 #include <esp_cpu.h>
25
26 #include <zephyr/linker/linker-defs.h>
27 #include <kernel_internal.h>
28
29 #if CONFIG_SOC_SERIES_ESP32C6
30 #include <soc/hp_apm_reg.h>
31 #include <soc/lp_apm_reg.h>
32 #include <soc/lp_apm0_reg.h>
33 #include <soc/pcr_reg.h>
34 #endif /* CONFIG_SOC_SERIES_ESP32C6 */
35
36 #include <esp_flash_internal.h>
37 #include <bootloader_flash.h>
38 #include <bootloader_flash_priv.h>
39 #include <hal/efuse_ll.h>
40 #include <hal/efuse_hal.h>
41 #include <hal/wdt_hal.h>
42 #include <soc/chip_revision.h>
43 #include <soc/rtc.h>
44 #ifndef CONFIG_SOC_SERIES_ESP32
45 #include <soc/assist_debug_reg.h>
46 #include <soc/system_reg.h>
47 #endif
48
49 #include "hw_init.h"
50 #include "soc_init.h"
51 #include "soc_random.h"
52
53 #if defined(CONFIG_SOC_ESP32S3_APPCPU) || defined(CONFIG_SOC_ESP32_APPCPU)
54 #error "APPCPU does not need this file!"
55 #endif
56
57 #define TAG "boot"
58
59 #define CHECKSUM_ALIGN 16
60 #define IS_PADD(addr) (addr == 0)
61 #define IS_DRAM(addr) (addr >= SOC_DRAM_LOW && addr < SOC_DRAM_HIGH)
62 #define IS_IRAM(addr) (addr >= SOC_IRAM_LOW && addr < SOC_IRAM_HIGH)
63 #define IS_IROM(addr) (addr >= SOC_IROM_LOW && addr < SOC_IROM_HIGH)
64 #define IS_DROM(addr) (addr >= SOC_DROM_LOW && addr < SOC_DROM_HIGH)
65 #ifdef SOC_RTC_MEM_SUPPORTED
66 #define IS_RTC(addr) (addr >= SOC_RTC_DRAM_LOW && addr < SOC_RTC_DRAM_HIGH)
67 #else
68 #define IS_RTC(addr) 0
69 #endif
70 #define IS_SRAM(addr) (IS_IRAM(addr) || IS_DRAM(addr))
71 #define IS_MMAP(addr) (IS_IROM(addr) || IS_DROM(addr))
72 #define IS_NONE(addr) (!IS_IROM(addr) && !IS_DROM(addr) \
73 && !IS_IRAM(addr) && !IS_DRAM(addr) && !IS_PADD(addr) && !IS_RTC(addr))
74
75 #define HDR_ATTR __attribute__((section(".entry_addr"))) __attribute__((used))
76
77 #if !defined(CONFIG_SOC_ESP32_APPCPU) && !defined(CONFIG_SOC_ESP32S3_APPCPU)
78 #define PART_OFFSET FIXED_PARTITION_OFFSET(slot0_partition)
79 #else
80 #define PART_OFFSET FIXED_PARTITION_OFFSET(slot0_appcpu_partition)
81 #endif
82
83 void __start(void);
84 static HDR_ATTR void (*_entry_point)(void) = &__start;
85
86 esp_image_header_t WORD_ALIGNED_ATTR bootloader_image_hdr;
87 extern uint32_t _image_irom_start, _image_irom_size, _image_irom_vaddr;
88 extern uint32_t _image_drom_start, _image_drom_size, _image_drom_vaddr;
89
90 #ifndef CONFIG_MCUBOOT
91
92 extern uint32_t _libc_heap_size;
93 static uint32_t libc_heap_size = (uint32_t)&_libc_heap_size;
94
95 static struct rom_segments map = {
96 .irom_map_addr = (uint32_t)&_image_irom_vaddr,
97 .irom_flash_offset = PART_OFFSET + (uint32_t)&_image_irom_start,
98 .irom_size = (uint32_t)&_image_irom_size,
99 .drom_map_addr = ((uint32_t)&_image_drom_vaddr),
100 .drom_flash_offset = PART_OFFSET + (uint32_t)&_image_drom_start,
101 .drom_size = (uint32_t)&_image_drom_size,
102 };
103
map_rom_segments(int core,struct rom_segments * map)104 void map_rom_segments(int core, struct rom_segments *map)
105 {
106 uint32_t app_irom_vaddr_align = map->irom_map_addr & MMU_FLASH_MASK;
107 uint32_t app_irom_start_align = map->irom_flash_offset & MMU_FLASH_MASK;
108
109 uint32_t app_drom_vaddr_align = map->drom_map_addr & MMU_FLASH_MASK;
110 uint32_t app_drom_start_align = map->drom_flash_offset & MMU_FLASH_MASK;
111
112 /* Traverse segments to fix flash offset changes due to post-build processing */
113 #ifndef CONFIG_BOOTLOADER_MCUBOOT
114 esp_image_segment_header_t WORD_ALIGNED_ATTR segment_hdr;
115 size_t offset = FIXED_PARTITION_OFFSET(boot_partition);
116 bool checksum = false;
117 unsigned int segments = 0;
118 unsigned int ram_segments = 0;
119
120 offset += sizeof(esp_image_header_t);
121
122 while (segments++ < 16) {
123
124 if (esp_rom_flash_read(offset, &segment_hdr,
125 sizeof(esp_image_segment_header_t), true) != 0) {
126 ESP_EARLY_LOGE(TAG, "Failed to read segment header at %x", offset);
127 abort();
128 }
129
130 /* TODO: Find better end-of-segment detection */
131 if (IS_NONE(segment_hdr.load_addr)) {
132 /* Total segment count = (segments - 1) */
133 break;
134 }
135
136 ESP_EARLY_LOGI(TAG, "%s: lma 0x%08x vma 0x%08x len 0x%-6x (%u)",
137 IS_NONE(segment_hdr.load_addr) ? "???" :
138 IS_MMAP(segment_hdr.load_addr) ?
139 IS_IROM(segment_hdr.load_addr) ? "IMAP" : "DMAP" :
140 IS_DRAM(segment_hdr.load_addr) ? "DRAM" :
141 IS_RTC(segment_hdr.load_addr) ? "RTC" : "IRAM",
142 offset + sizeof(esp_image_segment_header_t),
143 segment_hdr.load_addr, segment_hdr.data_len, segment_hdr.data_len);
144
145 /* Fix drom and irom produced be the linker, as it could
146 * be invalidated by the elf2image and flash load offset
147 */
148 if (segment_hdr.load_addr == map->drom_map_addr) {
149 map->drom_flash_offset = offset + sizeof(esp_image_segment_header_t);
150 app_drom_start_align = map->drom_flash_offset & MMU_FLASH_MASK;
151 }
152 if (segment_hdr.load_addr == map->irom_map_addr) {
153 map->irom_flash_offset = offset + sizeof(esp_image_segment_header_t);
154 app_irom_start_align = map->irom_flash_offset & MMU_FLASH_MASK;
155 }
156 if (IS_SRAM(segment_hdr.load_addr) || IS_RTC(segment_hdr.load_addr)) {
157 ram_segments++;
158 }
159
160 offset += sizeof(esp_image_segment_header_t) + segment_hdr.data_len;
161
162 if (ram_segments == bootloader_image_hdr.segment_count && !checksum) {
163 offset += (CHECKSUM_ALIGN - 1) - (offset % CHECKSUM_ALIGN) + 1;
164 checksum = true;
165 }
166 }
167 if (segments == 0 || segments == 16) {
168 ESP_EARLY_LOGE(TAG, "Error parsing segments");
169 abort();
170 }
171
172 ESP_EARLY_LOGI(TAG, "Image with %d segments", segments - 1);
173 #endif /* !CONFIG_BOOTLOADER_MCUBOOT */
174
175 #if CONFIG_SOC_SERIES_ESP32
176 Cache_Read_Disable(core);
177 Cache_Flush(core);
178 #else
179 cache_hal_disable(CACHE_TYPE_ALL);
180 #endif /* CONFIG_SOC_SERIES_ESP32 */
181
182 /* Clear the MMU entries that are already set up,
183 * so the new app only has the mappings it creates.
184 */
185 if (core == 0) {
186 mmu_hal_unmap_all();
187 }
188
189 #if CONFIG_SOC_SERIES_ESP32
190 int rc = 0;
191 uint32_t drom_page_count =
192 (map->drom_size + CONFIG_MMU_PAGE_SIZE - 1) / CONFIG_MMU_PAGE_SIZE;
193
194 rc |= cache_flash_mmu_set(core, 0, app_drom_vaddr_align, app_drom_start_align, 64,
195 drom_page_count);
196
197 uint32_t irom_page_count =
198 (map->irom_size + CONFIG_MMU_PAGE_SIZE - 1) / CONFIG_MMU_PAGE_SIZE;
199
200 rc |= cache_flash_mmu_set(core, 0, app_irom_vaddr_align, app_irom_start_align, 64,
201 irom_page_count);
202 if (rc != 0) {
203 ESP_EARLY_LOGE(TAG, "Failed to setup flash cache (e=0x%X). Aborting!", rc);
204 abort();
205 }
206 #else
207 uint32_t actual_mapped_len = 0;
208
209 mmu_hal_map_region(core, MMU_TARGET_FLASH0, app_drom_vaddr_align, app_drom_start_align,
210 map->drom_size, &actual_mapped_len);
211
212 mmu_hal_map_region(core, MMU_TARGET_FLASH0, app_irom_vaddr_align, app_irom_start_align,
213 map->irom_size, &actual_mapped_len);
214 #endif /* CONFIG_SOC_SERIES_ESP32 */
215
216 /* ----------------------Enable corresponding buses---------------- */
217 cache_bus_mask_t bus_mask;
218
219 bus_mask = cache_ll_l1_get_bus(core, app_drom_vaddr_align, map->drom_size);
220 cache_ll_l1_enable_bus(core, bus_mask);
221 bus_mask = cache_ll_l1_get_bus(core, app_irom_vaddr_align, map->irom_size);
222 cache_ll_l1_enable_bus(core, bus_mask);
223
224 #if CONFIG_MP_MAX_NUM_CPUS > 1
225 bus_mask = cache_ll_l1_get_bus(1, app_drom_vaddr_align, map->drom_size);
226 cache_ll_l1_enable_bus(1, bus_mask);
227 bus_mask = cache_ll_l1_get_bus(1, app_irom_vaddr_align, map->irom_size);
228 cache_ll_l1_enable_bus(1, bus_mask);
229 #endif
230
231 /* ----------------------Enable Cache---------------- */
232 #if CONFIG_SOC_SERIES_ESP32
233 /* Application will need to do Cache_Flush(1) and Cache_Read_Enable(1) */
234 Cache_Read_Enable(core);
235 #else
236 cache_hal_enable(CACHE_TYPE_ALL);
237 #endif /* CONFIG_SOC_SERIES_ESP32 */
238
239 #if !defined(CONFIG_SOC_SERIES_ESP32) && !defined(CONFIG_SOC_SERIES_ESP32S2)
240 /* Configure the Cache MMU size for instruction and rodata in flash. */
241 uint32_t cache_mmu_irom_size =
242 ((map->irom_size + CONFIG_MMU_PAGE_SIZE - 1) / CONFIG_MMU_PAGE_SIZE) *
243 sizeof(uint32_t);
244
245 /* Split the cache usage by the segment sizes */
246 Cache_Set_IDROM_MMU_Size(cache_mmu_irom_size, CACHE_DROM_MMU_MAX_END - cache_mmu_irom_size);
247 #endif
248 }
249 #endif /* !CONFIG_MCUBOOT */
250
__start(void)251 void __start(void)
252 {
253 #ifdef CONFIG_RISCV_GP
254
255 __asm__ __volatile__("la t0, _esp_vector_table\n"
256 "csrw mtvec, t0\n");
257
258 /* Disable normal interrupts. */
259 csr_read_clear(mstatus, MSTATUS_MIE);
260
261 /* Configure the global pointer register
262 * (This should be the first thing startup does, as any other piece of code could be
263 * relaxed by the linker to access something relative to __global_pointer$)
264 */
265 __asm__ __volatile__(".option push\n"
266 ".option norelax\n"
267 "la gp, __global_pointer$\n"
268 ".option pop");
269
270 z_bss_zero();
271
272 #else /* xtensa */
273
274 extern uint32_t _init_start;
275
276 /* Move the exception vector table to IRAM. */
277 __asm__ __volatile__("wsr %0, vecbase" : : "r"(&_init_start));
278
279 z_bss_zero();
280
281 __asm__ __volatile__("" : : "g"(&__bss_start) : "memory");
282
283 /* Disable normal interrupts. */
284 __asm__ __volatile__("wsr %0, PS" : : "r"(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE));
285
286 /* Initialize the architecture CPU pointer. Some of the
287 * initialization code wants a valid arch_current_thread() before
288 * arch_kernel_init() is invoked.
289 */
290 __asm__ __volatile__("wsr.MISC0 %0; rsync" : : "r"(&_kernel.cpus[0]));
291
292 #endif /* CONFIG_RISCV_GP */
293
294 /* Initialize hardware only during 1st boot */
295 #if defined(CONFIG_MCUBOOT) || defined(CONFIG_ESP_SIMPLE_BOOT)
296 if (hardware_init()) {
297 ESP_EARLY_LOGE(TAG, "HW init failed, aborting");
298 abort();
299 }
300 #endif
301
302 #if defined(CONFIG_ESP_SIMPLE_BOOT) || defined(CONFIG_BOOTLOADER_MCUBOOT)
303 map_rom_segments(0, &map);
304
305 /* Show map segments continue using same log format as during MCUboot phase */
306 ESP_EARLY_LOGI(TAG, "%s segment: paddr=%08xh, vaddr=%08xh, size=%05Xh (%6d) map", "IROM",
307 map.irom_flash_offset, map.irom_map_addr, map.irom_size, map.irom_size);
308 ESP_EARLY_LOGI(TAG, "%s segment: paddr=%08xh, vaddr=%08xh, size=%05Xh (%6d) map", "DROM",
309 map.drom_flash_offset, map.drom_map_addr, map.drom_size, map.drom_size);
310 esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
311
312 /* Disable RNG entropy source as it was already used */
313 soc_random_disable();
314
315 /* Disable glitch detection as it can be falsely triggered by EMI interference */
316 ana_clock_glitch_reset_config(false);
317
318 ESP_EARLY_LOGI(TAG, "libc heap size %d kB.", libc_heap_size / 1024);
319
320 __esp_platform_app_start();
321 #endif /* CONFIG_ESP_SIMPLE_BOOT || CONFIG_BOOTLOADER_MCUBOOT */
322
323 #if defined(CONFIG_MCUBOOT)
324 __esp_platform_mcuboot_start();
325 #endif
326 }
327