1 /*
2 * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <sys/param.h>
7 #include <stdint.h>
8 #include <stdbool.h>
9 #include "sdkconfig.h"
10 #include "esp_err.h"
11 #include "esp_attr.h"
12 #include "hal/assert.h"
13 #include "hal/mmu_hal.h"
14 #include "hal/mmu_ll.h"
15 #include "rom/cache.h"
16 #include "esp_rom_caps.h"
17
mmu_hal_init(void)18 void mmu_hal_init(void)
19 {
20 #if ESP_ROM_RAM_APP_NEEDS_MMU_INIT
21 ROM_Boot_Cache_Init();
22 #endif
23 mmu_ll_set_page_size(0, CONFIG_MMU_PAGE_SIZE);
24 mmu_hal_unmap_all();
25 }
26
mmu_hal_unmap_all(void)27 void mmu_hal_unmap_all(void)
28 {
29 mmu_ll_unmap_all(0);
30 #if !CONFIG_FREERTOS_UNICORE
31 mmu_ll_unmap_all(1);
32 #endif
33 }
34
mmu_hal_pages_to_bytes(uint32_t mmu_id,uint32_t page_num)35 uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num)
36 {
37 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
38 uint32_t shift_code = 0;
39 switch (page_size) {
40 case MMU_PAGE_64KB:
41 shift_code = 16;
42 break;
43 case MMU_PAGE_32KB:
44 shift_code = 15;
45 break;
46 case MMU_PAGE_16KB:
47 shift_code = 14;
48 break;
49 default:
50 HAL_ASSERT(shift_code);
51 }
52 return page_num << shift_code;
53 }
54
mmu_hal_bytes_to_pages(uint32_t mmu_id,uint32_t bytes)55 uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes)
56 {
57 mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
58 uint32_t shift_code = 0;
59 switch (page_size) {
60 case MMU_PAGE_64KB:
61 shift_code = 16;
62 break;
63 case MMU_PAGE_32KB:
64 shift_code = 15;
65 break;
66 case MMU_PAGE_16KB:
67 shift_code = 14;
68 break;
69 default:
70 HAL_ASSERT(shift_code);
71 }
72 return bytes >> shift_code;
73 }
74
mmu_hal_map_region(uint32_t mmu_id,mmu_target_t mem_type,uint32_t vaddr,uint32_t paddr,uint32_t len,uint32_t * out_len)75 void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t paddr, uint32_t len, uint32_t *out_len)
76 {
77 uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
78 HAL_ASSERT(vaddr % page_size_in_bytes == 0);
79 HAL_ASSERT(paddr % page_size_in_bytes == 0);
80 HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, len));
81 HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
82
83 uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
84 uint32_t entry_id = 0;
85 uint32_t mmu_val; //This is the physical address in the format that MMU supported
86
87 *out_len = mmu_hal_pages_to_bytes(mmu_id, page_num);
88 mmu_val = mmu_ll_format_paddr(mmu_id, paddr, mem_type);
89
90 while (page_num) {
91 entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
92 mmu_ll_write_entry(mmu_id, entry_id, mmu_val, mem_type);
93 vaddr += page_size_in_bytes;
94 mmu_val++;
95 page_num--;
96 }
97 }
98
mmu_hal_unmap_region(uint32_t mmu_id,uint32_t vaddr,uint32_t len)99 void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
100 {
101 uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
102 HAL_ASSERT(vaddr % page_size_in_bytes == 0);
103 HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, len, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
104
105 uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
106 uint32_t entry_id = 0;
107 while (page_num) {
108 entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
109 mmu_ll_set_entry_invalid(mmu_id, entry_id);
110 vaddr += page_size_in_bytes;
111 page_num--;
112 }
113 }
114
mmu_hal_vaddr_to_paddr(uint32_t mmu_id,uint32_t vaddr,uint32_t * out_paddr,mmu_target_t * out_target)115 bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target)
116 {
117 HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(mmu_id, vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION));
118 uint32_t entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
119 if (!mmu_ll_check_entry_valid(mmu_id, entry_id)) {
120 return false;
121 }
122
123 uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
124 uint32_t offset = (uint32_t)vaddr % page_size_in_bytes;
125
126 *out_target = mmu_ll_get_entry_target(mmu_id, entry_id);
127 uint32_t paddr_base = mmu_ll_entry_id_to_paddr_base(mmu_id, entry_id);
128 *out_paddr = paddr_base | offset;
129
130 return true;
131 }
132
mmu_hal_paddr_to_vaddr(uint32_t mmu_id,uint32_t paddr,mmu_target_t target,mmu_vaddr_t type,uint32_t * out_vaddr)133 bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
134 {
135 HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, 1));
136
137 uint32_t mmu_val = mmu_ll_format_paddr(mmu_id, paddr, target);
138 int entry_id = mmu_ll_find_entry_id_based_on_map_value(mmu_id, mmu_val, target);
139 if (entry_id == -1) {
140 return false;
141 }
142
143 uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
144 uint32_t offset = paddr % page_size_in_bytes;
145 uint32_t vaddr_base = mmu_ll_entry_id_to_vaddr_base(mmu_id, entry_id, type);
146 if (vaddr_base == 0) {
147 return false;
148 }
149 *out_vaddr = vaddr_base | offset;
150
151 return true;
152 }
153
mmu_hal_check_valid_ext_vaddr_region(uint32_t mmu_id,uint32_t vaddr_start,uint32_t len,mmu_vaddr_t type)154 bool mmu_hal_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t vaddr_start, uint32_t len, mmu_vaddr_t type)
155 {
156 return mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr_start, len, type);
157 }
158