1 /*
2 * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 // The LL layer for MMU register operations
8
9 #pragma once
10
11 #include "soc/spi_mem_reg.h"
12 #include "soc/ext_mem_defs.h"
13 #include "hal/assert.h"
14 #include "hal/mmu_types.h"
15 #include "hal/efuse_ll.h"
16
17
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21
22 /**
23 * Convert MMU virtual address to linear address
24 *
25 * @param vaddr virtual address
26 *
27 * @return linear address
28 */
mmu_ll_vaddr_to_laddr(uint32_t vaddr)29 static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
30 {
31 return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
32 }
33
34 /**
35 * Convert MMU linear address to virtual address
36 *
37 * @param laddr linear address
38 * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
39 *
40 * @return virtual address
41 */
mmu_ll_laddr_to_vaddr(uint32_t laddr,mmu_vaddr_t vaddr_type)42 static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
43 {
44 //On ESP32C6, I/D share the same vaddr range
45 return SOC_MMU_IBUS_VADDR_BASE | laddr;
46 }
47
mmu_ll_cache_encryption_enabled(void)48 __attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
49 {
50 unsigned cnt = efuse_ll_get_flash_crypt_cnt();
51 // 3 bits wide, any odd number - 1 or 3 - bits set means encryption is on
52 cnt = ((cnt >> 2) ^ (cnt >> 1) ^ cnt) & 0x1;
53 return (cnt == 1);
54 }
55
56 /**
57 * Get MMU page size
58 *
59 * @param mmu_id MMU ID
60 *
61 * @return MMU page size code
62 */
63 __attribute__((always_inline))
mmu_ll_get_page_size(uint32_t mmu_id)64 static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
65 {
66 (void)mmu_id;
67 uint32_t page_size_code = REG_GET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE);
68 return (page_size_code == 0) ? MMU_PAGE_64KB : \
69 (page_size_code == 1) ? MMU_PAGE_32KB : \
70 (page_size_code == 2) ? MMU_PAGE_16KB : \
71 MMU_PAGE_8KB;
72 }
73
74 /**
75 * Set MMU page size
76 *
77 * @param size MMU page size
78 */
79 __attribute__((always_inline))
mmu_ll_set_page_size(uint32_t mmu_id,uint32_t size)80 static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
81 {
82 uint8_t reg_val = (size == MMU_PAGE_64KB) ? 0 : \
83 (size == MMU_PAGE_32KB) ? 1 : \
84 (size == MMU_PAGE_16KB) ? 2 : \
85 (size == MMU_PAGE_8KB) ? 3 : 0;
86 REG_SET_FIELD(SPI_MEM_MMU_POWER_CTRL_REG(0), SPI_MEM_MMU_PAGE_SIZE, reg_val);
87 }
88
89 /**
90 * Check if the external memory vaddr region is valid
91 *
92 * @param mmu_id MMU ID
93 * @param vaddr_start start of the virtual address
94 * @param len length, in bytes
95 * @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
96 *
97 * @return
98 * True for valid
99 */
100 __attribute__((always_inline))
mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id,uint32_t vaddr_start,uint32_t len,mmu_vaddr_t type)101 static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
102 {
103 (void)mmu_id;
104 (void)type;
105 uint32_t vaddr_end = vaddr_start + len - 1;
106 return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
107 }
108
109 /**
110 * Check if the paddr region is valid
111 *
112 * @param mmu_id MMU ID
113 * @param paddr_start start of the physical address
114 * @param len length, in bytes
115 *
116 * @return
117 * True for valid
118 */
mmu_ll_check_valid_paddr_region(uint32_t mmu_id,uint32_t paddr_start,uint32_t len)119 static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
120 {
121 (void)mmu_id;
122 return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
123 (len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
124 ((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
125 }
126
127 /**
128 * To get the MMU table entry id to be mapped
129 *
130 * @param mmu_id MMU ID
131 * @param vaddr virtual address to be mapped
132 *
133 * @return
134 * MMU table entry id
135 */
136 __attribute__((always_inline))
mmu_ll_get_entry_id(uint32_t mmu_id,uint32_t vaddr)137 static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
138 {
139 (void)mmu_id;
140 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
141 uint32_t shift_code = 0;
142 switch (page_size) {
143 case MMU_PAGE_64KB:
144 shift_code = 16;
145 break;
146 case MMU_PAGE_32KB:
147 shift_code = 15;
148 break;
149 case MMU_PAGE_16KB:
150 shift_code = 14;
151 break;
152 case MMU_PAGE_8KB:
153 shift_code = 13;
154 break;
155 default:
156 HAL_ASSERT(shift_code);
157 }
158 return ((vaddr & MMU_VADDR_MASK) >> shift_code);
159 }
160
161 /**
162 * Format the paddr to be mappable
163 *
164 * @param mmu_id MMU ID
165 * @param paddr physical address to be mapped
166 * @param target paddr memory target, not used
167 *
168 * @return
169 * mmu_val - paddr in MMU table supported format
170 */
171 __attribute__((always_inline))
mmu_ll_format_paddr(uint32_t mmu_id,uint32_t paddr,mmu_target_t target)172 static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
173 {
174 (void)mmu_id;
175 (void)target;
176 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
177 uint32_t shift_code = 0;
178 switch (page_size) {
179 case MMU_PAGE_64KB:
180 shift_code = 16;
181 break;
182 case MMU_PAGE_32KB:
183 shift_code = 15;
184 break;
185 case MMU_PAGE_16KB:
186 shift_code = 14;
187 break;
188 case MMU_PAGE_8KB:
189 shift_code = 13;
190 break;
191 default:
192 HAL_ASSERT(shift_code);
193 }
194 return paddr >> shift_code;
195 }
196
197 /**
198 * Write to the MMU table to map the virtual memory and the physical memory
199 *
200 * @param mmu_id MMU ID
201 * @param entry_id MMU entry ID
202 * @param mmu_val Value to be set into an MMU entry, for physical address
203 * @param target MMU target physical memory.
204 */
mmu_ll_write_entry(uint32_t mmu_id,uint32_t entry_id,uint32_t mmu_val,mmu_target_t target)205 __attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
206 {
207 (void)mmu_id;
208 (void)target;
209 uint32_t mmu_raw_value;
210 if (mmu_ll_cache_encryption_enabled()) {
211 mmu_val |= MMU_SENSITIVE;
212 }
213
214 mmu_raw_value = mmu_val | MMU_VALID;
215 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
216 REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value);
217 }
218
219 /**
220 * Read the raw value from MMU table
221 *
222 * @param mmu_id MMU ID
223 * @param entry_id MMU entry ID
224 * @param mmu_val Value to be read from MMU table
225 */
mmu_ll_read_entry(uint32_t mmu_id,uint32_t entry_id)226 __attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
227 {
228 (void)mmu_id;
229 uint32_t mmu_raw_value;
230 uint32_t ret;
231 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
232 mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
233 if (mmu_ll_cache_encryption_enabled()) {
234 mmu_raw_value &= ~MMU_SENSITIVE;
235 }
236 if (!(mmu_raw_value & MMU_VALID)) {
237 return 0;
238 }
239 ret = mmu_raw_value & MMU_VALID_VAL_MASK;
240 return ret;
241 }
242
243 /**
244 * Set MMU table entry as invalid
245 *
246 * @param mmu_id MMU ID
247 * @param entry_id MMU entry
248 */
mmu_ll_set_entry_invalid(uint32_t mmu_id,uint32_t entry_id)249 __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
250 {
251 (void)mmu_id;
252 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
253 REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
254 }
255
256 /**
257 * Unmap all the items in the MMU table
258 *
259 * @param mmu_id MMU ID
260 */
261 __attribute__((always_inline))
mmu_ll_unmap_all(uint32_t mmu_id)262 static inline void mmu_ll_unmap_all(uint32_t mmu_id)
263 {
264 for (int i = 0; i < MMU_ENTRY_NUM; i++) {
265 mmu_ll_set_entry_invalid(mmu_id, i);
266 }
267 }
268
269 /**
270 * Check MMU table entry value is valid
271 *
272 * @param mmu_id MMU ID
273 * @param entry_id MMU entry ID
274 *
275 * @return Ture for MMU entry is valid; False for invalid
276 */
mmu_ll_check_entry_valid(uint32_t mmu_id,uint32_t entry_id)277 static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
278 {
279 (void)mmu_id;
280 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
281
282 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
283 return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID) ? true : false;
284 }
285
286 /**
287 * Get the MMU table entry target
288 *
289 * @param mmu_id MMU ID
290 * @param entry_id MMU entry ID
291 *
292 * @return Target, see `mmu_target_t`
293 */
mmu_ll_get_entry_target(uint32_t mmu_id,uint32_t entry_id)294 static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
295 {
296 (void)mmu_id;
297 return MMU_TARGET_FLASH0;
298 }
299
300 /**
301 * Convert MMU entry ID to paddr base
302 *
303 * @param mmu_id MMU ID
304 * @param entry_id MMU entry ID
305 *
306 * @return paddr base
307 */
mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id,uint32_t entry_id)308 static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
309 {
310 (void)mmu_id;
311 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
312
313 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
314 uint32_t shift_code = 0;
315 switch (page_size) {
316 case MMU_PAGE_64KB:
317 shift_code = 16;
318 break;
319 case MMU_PAGE_32KB:
320 shift_code = 15;
321 break;
322 case MMU_PAGE_16KB:
323 shift_code = 14;
324 break;
325 case MMU_PAGE_8KB:
326 shift_code = 13;
327 break;
328 default:
329 HAL_ASSERT(shift_code);
330 }
331
332 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
333 return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) << shift_code;
334 }
335
336 /**
337 * Find the MMU table entry ID based on table map value
338 * @note This function can only find the first match entry ID. However it is possible that a physical address
339 * is mapped to multiple virtual addresses
340 *
341 * @param mmu_id MMU ID
342 * @param mmu_val map value to be read from MMU table standing for paddr
343 * @param target physical memory target, see `mmu_target_t`
344 *
345 * @return MMU entry ID, -1 for invalid
346 */
mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id,uint32_t mmu_val,mmu_target_t target)347 static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
348 {
349 (void)mmu_id;
350 for (int i = 0; i < MMU_ENTRY_NUM; i++) {
351 if (mmu_ll_check_entry_valid(mmu_id, i)) {
352 if (mmu_ll_get_entry_target(mmu_id, i) == target) {
353 REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
354 if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) == mmu_val) {
355 return i;
356 }
357 }
358 }
359 }
360
361 return -1;
362 }
363
364 /**
365 * Convert MMU entry ID to vaddr base
366 *
367 * @param mmu_id MMU ID
368 * @param entry_id MMU entry ID
369 * @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
370 */
mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id,uint32_t entry_id,mmu_vaddr_t type)371 static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
372 {
373 (void)mmu_id;
374 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
375 uint32_t shift_code = 0;
376
377 switch (page_size) {
378 case MMU_PAGE_64KB:
379 shift_code = 16;
380 break;
381 case MMU_PAGE_32KB:
382 shift_code = 15;
383 break;
384 case MMU_PAGE_16KB:
385 shift_code = 14;
386 break;
387 case MMU_PAGE_8KB:
388 shift_code = 13;
389 break;
390 default:
391 HAL_ASSERT(shift_code);
392 }
393 uint32_t laddr = entry_id << shift_code;
394 return mmu_ll_laddr_to_vaddr(laddr, type);
395 }
396
397 #ifdef __cplusplus
398 }
399 #endif
400