1 /*
2 * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 // The LL layer for MMU register operations
8
9 #pragma once
10
11 #include <stdbool.h>
12 #include "soc/extmem_reg.h"
13 #include "soc/ext_mem_defs.h"
14 #include "hal/assert.h"
15 #include "hal/mmu_types.h"
16
17
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21
22 /**
23 * Convert MMU virtual address to linear address
24 *
25 * @param vaddr virtual address
26 *
27 * @return linear address
28 */
mmu_ll_vaddr_to_laddr(uint32_t vaddr)29 static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
30 {
31 return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
32 }
33
34 /**
35 * Convert MMU linear address to virtual address
36 *
37 * @param laddr linear address
38 * @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
39 *
40 * @return virtual address
41 */
mmu_ll_laddr_to_vaddr(uint32_t laddr,mmu_vaddr_t vaddr_type)42 static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
43 {
44 uint32_t vaddr_base = 0;
45 if (vaddr_type == MMU_VADDR_DATA) {
46 vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
47 } else {
48 vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
49 }
50
51 return vaddr_base | laddr;
52 }
53
54 /**
55 * Get MMU page size
56 *
57 * @param mmu_id MMU ID
58 *
59 * @return MMU page size code
60 */
61 __attribute__((always_inline))
mmu_ll_get_page_size(uint32_t mmu_id)62 static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
63 {
64 (void)mmu_id;
65 uint32_t page_size_code = REG_GET_FIELD(EXTMEM_CACHE_CONF_MISC_REG, EXTMEM_CACHE_MMU_PAGE_SIZE);
66 return (page_size_code == 0) ? MMU_PAGE_16KB : (page_size_code == 1) ? MMU_PAGE_32KB : MMU_PAGE_64KB;
67 }
68
69 /**
70 * Set MMU page size
71 *
72 * @param size MMU page size
73 */
74 __attribute__((always_inline))
mmu_ll_set_page_size(uint32_t mmu_id,uint32_t size)75 static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
76 {
77 uint8_t reg_val = (size == MMU_PAGE_16KB) ? 0 : (size == MMU_PAGE_32KB) ? 1 : 2;
78 REG_SET_FIELD(EXTMEM_CACHE_CONF_MISC_REG, EXTMEM_CACHE_MMU_PAGE_SIZE, reg_val);
79 }
80
81 /**
82 * Check if the external memory vaddr region is valid
83 *
84 * @param mmu_id MMU ID
85 * @param vaddr_start start of the virtual address
86 * @param len length, in bytes
87 * @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
88 *
89 * @return
90 * True for valid
91 */
92 __attribute__((always_inline))
mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id,uint32_t vaddr_start,uint32_t len,mmu_vaddr_t type)93 static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
94 {
95 (void)mmu_id;
96 uint32_t vaddr_end = vaddr_start + len - 1;
97 bool valid = false;
98
99 if (type & MMU_VADDR_INSTRUCTION) {
100 valid |= (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end));
101 }
102
103 if (type & MMU_VADDR_DATA) {
104 valid |= (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
105 }
106
107 return valid;
108 }
109
110 /**
111 * Check if the paddr region is valid
112 *
113 * @param mmu_id MMU ID
114 * @param paddr_start start of the physical address
115 * @param len length, in bytes
116 *
117 * @return
118 * True for valid
119 */
mmu_ll_check_valid_paddr_region(uint32_t mmu_id,uint32_t paddr_start,uint32_t len)120 static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
121 {
122 (void)mmu_id;
123 return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
124 (len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
125 ((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
126 }
127
128 /**
129 * To get the MMU table entry id to be mapped
130 *
131 * @param mmu_id MMU ID
132 * @param vaddr virtual address to be mapped
133 *
134 * @return
135 * MMU table entry id
136 */
137 __attribute__((always_inline))
mmu_ll_get_entry_id(uint32_t mmu_id,uint32_t vaddr)138 static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
139 {
140 (void)mmu_id;
141
142 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
143 uint32_t shift_code = 0;
144 switch (page_size) {
145 case MMU_PAGE_64KB:
146 shift_code = 16;
147 break;
148 case MMU_PAGE_32KB:
149 shift_code = 15;
150 break;
151 case MMU_PAGE_16KB:
152 shift_code = 14;
153 break;
154 default:
155 HAL_ASSERT(shift_code);
156 }
157
158 return ((vaddr & MMU_VADDR_MASK) >> shift_code);
159 }
160
161 /**
162 * Format the paddr to be mappable
163 *
164 * @param mmu_id MMU ID
165 * @param paddr physical address to be mapped
166 * @param target paddr memory target, not used
167 *
168 * @return
169 * mmu_val - paddr in MMU table supported format
170 */
171 __attribute__((always_inline))
mmu_ll_format_paddr(uint32_t mmu_id,uint32_t paddr,mmu_target_t target)172 static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
173 {
174 (void)mmu_id;
175 (void)target;
176
177 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
178 uint32_t shift_code = 0;
179 switch (page_size) {
180 case MMU_PAGE_64KB:
181 shift_code = 16;
182 break;
183 case MMU_PAGE_32KB:
184 shift_code = 15;
185 break;
186 case MMU_PAGE_16KB:
187 shift_code = 14;
188 break;
189 default:
190 HAL_ASSERT(shift_code);
191 }
192
193 return paddr >> shift_code;
194 }
195
196 /**
197 * Write to the MMU table to map the virtual memory and the physical memory
198 *
199 * @param mmu_id MMU ID
200 * @param entry_id MMU entry ID
201 * @param mmu_val Value to be set into an MMU entry, for physical address
202 * @param target MMU target physical memory.
203 */
204 __attribute__((always_inline))
mmu_ll_write_entry(uint32_t mmu_id,uint32_t entry_id,uint32_t mmu_val,mmu_target_t target)205 static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
206 {
207 (void)mmu_id;
208 HAL_ASSERT(target == MMU_TARGET_FLASH0);
209 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
210
211 *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = mmu_val | MMU_ACCESS_FLASH | MMU_VALID;
212 }
213
214 /**
215 * Read the raw value from MMU table
216 *
217 * @param mmu_id MMU ID
218 * @param entry_id MMU entry ID
219 * @param mmu_val Value to be read from MMU table
220 */
221 __attribute__((always_inline))
mmu_ll_read_entry(uint32_t mmu_id,uint32_t entry_id)222 static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
223 {
224 (void)mmu_id;
225 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
226
227 return *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4);
228 }
229
230 /**
231 * Set MMU table entry as invalid
232 *
233 * @param mmu_id MMU ID
234 * @param entry_id MMU entry ID
235 */
236 __attribute__((always_inline))
mmu_ll_set_entry_invalid(uint32_t mmu_id,uint32_t entry_id)237 static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
238 {
239 (void)mmu_id;
240 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
241
242 *(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) = MMU_INVALID;
243 }
244
245 /**
246 * Unmap all the items in the MMU table
247 *
248 * @param mmu_id MMU ID
249 */
250 __attribute__((always_inline))
mmu_ll_unmap_all(uint32_t mmu_id)251 static inline void mmu_ll_unmap_all(uint32_t mmu_id)
252 {
253 for (int i = 0; i < MMU_ENTRY_NUM; i++) {
254 mmu_ll_set_entry_invalid(mmu_id, i);
255 }
256 }
257
258 /**
259 * Check MMU table entry value is valid
260 *
261 * @param mmu_id MMU ID
262 * @param entry_id MMU entry ID
263 *
264 * @return Ture for MMU entry is valid; False for invalid
265 */
mmu_ll_check_entry_valid(uint32_t mmu_id,uint32_t entry_id)266 static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
267 {
268 (void)mmu_id;
269 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
270
271 return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
272 }
273
274 /**
275 * Get the MMU table entry target
276 *
277 * @param mmu_id MMU ID
278 * @param entry_id MMU entry ID
279 *
280 * @return Target, see `mmu_target_t`
281 */
mmu_ll_get_entry_target(uint32_t mmu_id,uint32_t entry_id)282 static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
283 {
284 (void)mmu_id;
285 return MMU_TARGET_FLASH0;
286 }
287
288 /**
289 * Convert MMU entry ID to paddr base
290 *
291 * @param mmu_id MMU ID
292 * @param entry_id MMU entry ID
293 *
294 * @return paddr base
295 */
mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id,uint32_t entry_id)296 static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
297 {
298 (void)mmu_id;
299 HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
300
301 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
302 uint32_t shift_code = 0;
303 switch (page_size) {
304 case MMU_PAGE_64KB:
305 shift_code = 16;
306 break;
307 case MMU_PAGE_32KB:
308 shift_code = 15;
309 break;
310 case MMU_PAGE_16KB:
311 shift_code = 14;
312 break;
313 default:
314 HAL_ASSERT(shift_code);
315 }
316
317 return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << shift_code;
318 }
319
320 /**
321 * Find the MMU table entry ID based on table map value
322 * @note This function can only find the first match entry ID. However it is possible that a physical address
323 * is mapped to multiple virtual addresses
324 *
325 * @param mmu_id MMU ID
326 * @param mmu_val map value to be read from MMU table standing for paddr
327 * @param target physical memory target, see `mmu_target_t`
328 *
329 * @return MMU entry ID, -1 for invalid
330 */
mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id,uint32_t mmu_val,mmu_target_t target)331 static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
332 {
333 (void)mmu_id;
334 for (int i = 0; i < MMU_ENTRY_NUM; i++) {
335 if (mmu_ll_check_entry_valid(mmu_id, i)) {
336 if (mmu_ll_get_entry_target(mmu_id, i) == target) {
337 if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
338 return i;
339 }
340 }
341 }
342 }
343
344 return -1;
345 }
346
347 /**
348 * Convert MMU entry ID to vaddr base
349 *
350 * @param mmu_id MMU ID
351 * @param entry_id MMU entry ID
352 * @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
353 */
mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id,uint32_t entry_id,mmu_vaddr_t type)354 static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
355 {
356 (void)mmu_id;
357 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
358 uint32_t shift_code = 0;
359
360 switch (page_size) {
361 case MMU_PAGE_64KB:
362 shift_code = 16;
363 break;
364 case MMU_PAGE_32KB:
365 shift_code = 15;
366 break;
367 case MMU_PAGE_16KB:
368 shift_code = 14;
369 break;
370 default:
371 HAL_ASSERT(shift_code);
372 }
373 uint32_t laddr = entry_id << shift_code;
374 return mmu_ll_laddr_to_vaddr(laddr, type);
375 }
376
377 #ifdef __cplusplus
378 }
379 #endif
380