1 /*
2 * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include "sdkconfig.h"
9 #include "soc/soc.h"
10 #include "esp_cpu.h"
11 #include "esp_fault.h"
12
13 #ifdef BOOTLOADER_BUILD
14 // Without L bit set
15 #define CONDITIONAL_NONE 0x0
16 #define CONDITIONAL_R PMP_R
17 #define CONDITIONAL_RX PMP_R | PMP_X
18 #define CONDITIONAL_RW PMP_R | PMP_W
19 #define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
20 #else
21 // With L bit set
22 #define CONDITIONAL_NONE NONE
23 #define CONDITIONAL_R R
24 #define CONDITIONAL_RX RX
25 #define CONDITIONAL_RW RW
26 #define CONDITIONAL_RWX RWX
27 #endif
28
29 #define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
30 #define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
31
esp_cpu_configure_invalid_regions(void)32 static void esp_cpu_configure_invalid_regions(void)
33 {
34 const unsigned PMA_NONE = PMA_L | PMA_EN;
35 __attribute__((unused)) const unsigned PMA_RW = PMA_L | PMA_EN | PMA_R | PMA_W;
36 __attribute__((unused)) const unsigned PMA_RX = PMA_L | PMA_EN | PMA_R | PMA_X;
37 __attribute__((unused)) const unsigned PMA_RWX = PMA_L | PMA_EN | PMA_R | PMA_W | PMA_X;
38
39 // 1. Gap at bottom of address space
40 PMA_ENTRY_SET_TOR(0, SOC_CPU_SUBSYSTEM_LOW, PMA_TOR | PMA_NONE);
41
42 // 2. Gap between CPU subsystem region & IROM
43 PMA_ENTRY_SET_TOR(1, SOC_CPU_SUBSYSTEM_HIGH, PMA_NONE);
44 PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
45
46 // 3. Gap between ROM & RAM
47 PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE);
48 PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
49
50 // 4. Gap between DRAM and I_Cache
51 PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE);
52 PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
53
54 // 5. Gap between D_Cache & LP_RAM
55 PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE);
56 PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
57
58 // 6. Gap between LP memory & peripheral addresses
59 PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE);
60 PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
61
62 // 7. End of address space
63 PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE);
64 PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE);
65 }
66
esp_cpu_configure_region_protection(void)67 void esp_cpu_configure_region_protection(void)
68 {
69 /* Notes on implementation:
70 *
71 * 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions
72 *
73 * 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges
74 *
75 * 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
76 * entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
77 * which can be used to provide more granular access
78 *
79 * 4) Entries are grouped in order with some static asserts to try and verify everything is
80 * correct.
81 */
82
83 /* There are 4 configuration scenarios for SRAM
84 *
85 * 1. Bootloader build:
86 * - We cannot set the lock bit as we need to reconfigure it again for the application.
87 * We configure PMP to cover entire valid IRAM and DRAM range.
88 *
89 * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
90 * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to
91 * and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries.
92 * We also lock these entries so the R/W/X permissions are enforced even for machine mode
93 *
94 * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
95 * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
96 * so for that we set PMP to cover entire valid IRAM and DRAM region.
97 * We also lock these entries so the R/W/X permissions are enforced even for machine mode
98 *
99 * 4. CPU is in OCD debug mode
100 * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
101 * We set PMP to cover entire valid IRAM and DRAM region.
102 * We also lock these entries so the R/W/X permissions are enforced even for machine mode
103 */
104 const unsigned NONE = PMP_L;
105 __attribute__((unused)) const unsigned R = PMP_L | PMP_R;
106 const unsigned RW = PMP_L | PMP_R | PMP_W;
107 const unsigned RX = PMP_L | PMP_R | PMP_X;
108 const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
109
110 //
111 // Configure all the invalid address regions using PMA
112 //
113 esp_cpu_configure_invalid_regions();
114
115 //
116 // Configure all the valid address regions using PMP
117 //
118
119 // 1. CPU Subsystem region - contains debug mode code and interrupt config registers
120 const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_CPU_SUBSYSTEM_LOW, SOC_CPU_SUBSYSTEM_HIGH);
121 PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | RWX);
122 _Static_assert(SOC_CPU_SUBSYSTEM_LOW < SOC_CPU_SUBSYSTEM_HIGH, "Invalid CPU subsystem region");
123
124 // 2.1 I/D-ROM
125 PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, NONE);
126 PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | RX);
127 _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I/D-ROM region");
128
129 if (esp_cpu_dbgr_is_attached()) {
130 // Anti-FI check that cpu is really in ocd mode
131 // ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
132
133 // 5. IRAM and DRAM
134 const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
135 PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | RWX);
136 _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
137 } else {
138 #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
139 extern int _iram_end;
140 // 5. IRAM and DRAM
141 /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
142 * Bootloader might have given extra permissions and those won't be cleared
143 */
144 PMP_ENTRY_CFG_RESET(5);
145 PMP_ENTRY_CFG_RESET(6);
146 PMP_ENTRY_CFG_RESET(7);
147 PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE);
148 PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | RX);
149 PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | RW);
150 #else
151 // 5. IRAM and DRAM
152 const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
153 PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
154 _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
155 #endif
156 }
157
158 #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
159 extern int _instruction_reserved_end;
160 extern int _rodata_reserved_end;
161
162 const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
163 const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
164
165 // 4. I_Cache / D_Cache (flash)
166 PMP_ENTRY_CFG_RESET(8);
167 PMP_ENTRY_CFG_RESET(9);
168 PMP_ENTRY_CFG_RESET(10);
169 PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
170 PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
171 PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
172 #else
173 // 4. I_Cache / D_Cache (flash)
174 const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
175 PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | CONDITIONAL_RX);
176 _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I/D_Cache region");
177 #endif
178
179 // 6. LP memory
180 #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
181 extern int _rtc_text_end;
182 /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
183 * Bootloader might have given extra permissions and those won't be cleared
184 */
185 PMP_ENTRY_CFG_RESET(11);
186 PMP_ENTRY_CFG_RESET(12);
187 PMP_ENTRY_CFG_RESET(13);
188 PMP_ENTRY_CFG_RESET(14);
189 PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
190 #if CONFIG_ULP_COPROC_RESERVE_MEM
191 // First part of LP mem is reserved for coprocessor
192 PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
193 #else // CONFIG_ULP_COPROC_RESERVE_MEM
194 // Repeat same previous entry, to ensure next entry has correct base address (TOR)
195 PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW, NONE);
196 #endif // !CONFIG_ULP_COPROC_RESERVE_MEM
197 PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
198 PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
199 #else
200 const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
201 PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
202 _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
203 #endif
204
205
206 // 7. Peripheral addresses
207 const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
208 PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
209 _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
210 }
211