1 /*
2 * Copyright (c) 2022 Intel Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5 #include <zephyr/kernel.h>
6 #include <zephyr/cache.h>
7 #include <zephyr/arch/xtensa/arch.h>
8 #include <zephyr/arch/xtensa/xtensa_mmu.h>
9 #include <zephyr/linker/linker-defs.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/kernel/mm.h>
12 #include <zephyr/toolchain.h>
13 #include <xtensa/corebits.h>
14 #include <xtensa_mmu_priv.h>
15
16 #include <kernel_arch_func.h>
17 #include <mmu.h>
18
19 /* Skip TLB IPI when updating page tables.
20 * This allows us to send IPI only after the last
21 * changes of a series.
22 */
23 #define OPTION_NO_TLB_IPI BIT(0)
24
25 /* Level 1 contains page table entries
26 * necessary to map the page table itself.
27 */
28 #define XTENSA_L1_PAGE_TABLE_ENTRIES 1024U
29
30 /* Size of level 1 page table.
31 */
32 #define XTENSA_L1_PAGE_TABLE_SIZE (XTENSA_L1_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
33
34 /* Level 2 contains page table entries
35 * necessary to map the page table itself.
36 */
37 #define XTENSA_L2_PAGE_TABLE_ENTRIES 1024U
38
39 /* Size of level 2 page table.
40 */
41 #define XTENSA_L2_PAGE_TABLE_SIZE (XTENSA_L2_PAGE_TABLE_ENTRIES * sizeof(uint32_t))
42
43 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
44
45 BUILD_ASSERT(CONFIG_MMU_PAGE_SIZE == 0x1000,
46 "MMU_PAGE_SIZE value is invalid, only 4 kB pages are supported\n");
47
48 /*
49 * Level 1 page table has to be 4Kb to fit into one of the wired entries.
50 * All entries are initialized as INVALID, so an attempt to read an unmapped
51 * area will cause a double exception.
52 *
53 * Each memory domain contains its own l1 page table. The kernel l1 page table is
54 * located at the index 0.
55 */
56 static uint32_t l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES][XTENSA_L1_PAGE_TABLE_ENTRIES]
57 __aligned(KB(4));
58
59
60 /*
61 * That is an alias for the page tables set used by the kernel.
62 */
63 uint32_t *xtensa_kernel_ptables = (uint32_t *)l1_page_table[0];
64
65 /*
66 * Each table in the level 2 maps a 4Mb memory range. It consists of 1024 entries each one
67 * covering a 4Kb page.
68 */
69 static uint32_t l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES][XTENSA_L2_PAGE_TABLE_ENTRIES]
70 __aligned(KB(4));
71
72 /*
73 * This additional variable tracks which l1 tables are in use. This is kept separated from
74 * the tables to keep alignment easier.
75 *
76 * @note: The first bit is set because it is used for the kernel page tables.
77 */
78 static ATOMIC_DEFINE(l1_page_table_track, CONFIG_XTENSA_MMU_NUM_L1_TABLES);
79
80 /*
81 * This additional variable tracks which l2 tables are in use. This is kept separated from
82 * the tables to keep alignment easier.
83 */
84 static ATOMIC_DEFINE(l2_page_tables_track, CONFIG_XTENSA_MMU_NUM_L2_TABLES);
85
86 /*
87 * Protects xtensa_domain_list and serializes access to page tables.
88 */
89 static struct k_spinlock xtensa_mmu_lock;
90
91 #ifdef CONFIG_USERSPACE
92
93 /*
94 * Each domain has its own ASID. ASID can go through 1 (kernel) to 255.
95 * When a TLB entry matches, the hw will check the ASID in the entry and finds
96 * the correspondent position in the RASID register. This position will then be
97 * compared with the current ring (CRING) to check the permission.
98 */
99 static uint8_t asid_count = 3;
100
101 /*
102 * List with all active and initialized memory domains.
103 */
104 static sys_slist_t xtensa_domain_list;
105 #endif /* CONFIG_USERSPACE */
106
107 extern char _heap_end[];
108 extern char _heap_start[];
109 /*
110 * Static definition of all code & data memory regions of the
111 * current Zephyr image. This information must be available &
112 * processed upon MMU initialization.
113 */
114
115 static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
116 /*
117 * Mark the zephyr execution regions (data, bss, noinit, etc.)
118 * cacheable, read / write and non-executable
119 */
120 {
121 /* This includes .data, .bss and various kobject sections. */
122 .start = (uint32_t)_image_ram_start,
123 .end = (uint32_t)_image_ram_end,
124 #ifdef CONFIG_XTENSA_RPO_CACHE
125 .attrs = XTENSA_MMU_PERM_W,
126 #else
127 .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
128 #endif
129 .name = "data",
130 },
131 #if K_HEAP_MEM_POOL_SIZE > 0
132 /* System heap memory */
133 {
134 .start = (uint32_t)_heap_start,
135 .end = (uint32_t)_heap_end,
136 #ifdef CONFIG_XTENSA_RPO_CACHE
137 .attrs = XTENSA_MMU_PERM_W,
138 #else
139 .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
140 #endif
141 .name = "heap",
142 },
143 #endif
144 /* Mark text segment cacheable, read only and executable */
145 {
146 .start = (uint32_t)__text_region_start,
147 .end = (uint32_t)__text_region_end,
148 .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
149 .name = "text",
150 },
151 /* Mark rodata segment cacheable, read only and non-executable */
152 {
153 .start = (uint32_t)__rodata_region_start,
154 .end = (uint32_t)__rodata_region_end,
155 .attrs = XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
156 .name = "rodata",
157 },
158 };
159
thread_page_tables_get(const struct k_thread * thread)160 static inline uint32_t *thread_page_tables_get(const struct k_thread *thread)
161 {
162 #ifdef CONFIG_USERSPACE
163 if ((thread->base.user_options & K_USER) != 0U) {
164 return thread->arch.ptables;
165 }
166 #endif
167
168 return xtensa_kernel_ptables;
169 }
170
171 /**
172 * @brief Check if the page table entry is illegal.
173 *
174 * @param[in] Page table entry.
175 */
is_pte_illegal(uint32_t pte)176 static inline bool is_pte_illegal(uint32_t pte)
177 {
178 uint32_t attr = pte & XTENSA_MMU_PTE_ATTR_MASK;
179
180 /*
181 * The ISA manual states only 12 and 14 are illegal values.
182 * 13 and 15 are not. So we need to be specific than simply
183 * testing if bits 2 and 3 are set.
184 */
185 return (attr == 12) || (attr == 14);
186 }
187
188 /*
189 * @brief Initialize all page table entries to be illegal.
190 *
191 * @param[in] Pointer to page table.
192 * @param[in] Number of page table entries in the page table.
193 */
init_page_table(uint32_t * ptable,size_t num_entries)194 static void init_page_table(uint32_t *ptable, size_t num_entries)
195 {
196 int i;
197
198 for (i = 0; i < num_entries; i++) {
199 ptable[i] = XTENSA_MMU_PTE_ILLEGAL;
200 }
201 }
202
alloc_l2_table(void)203 static inline uint32_t *alloc_l2_table(void)
204 {
205 uint16_t idx;
206
207 for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L2_TABLES; idx++) {
208 if (!atomic_test_and_set_bit(l2_page_tables_track, idx)) {
209 return (uint32_t *)&l2_page_tables[idx];
210 }
211 }
212
213 return NULL;
214 }
215
map_memory_range(const uint32_t start,const uint32_t end,const uint32_t attrs)216 static void map_memory_range(const uint32_t start, const uint32_t end,
217 const uint32_t attrs)
218 {
219 uint32_t page, *table;
220 bool shared = !!(attrs & XTENSA_MMU_MAP_SHARED);
221 uint32_t sw_attrs = (attrs & XTENSA_MMU_PTE_ATTR_ORIGINAL) == XTENSA_MMU_PTE_ATTR_ORIGINAL ?
222 attrs : 0;
223
224 for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
225 uint32_t pte = XTENSA_MMU_PTE(page,
226 shared ? XTENSA_MMU_SHARED_RING :
227 XTENSA_MMU_KERNEL_RING,
228 sw_attrs, attrs);
229 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
230 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
231
232 if (is_pte_illegal(xtensa_kernel_ptables[l1_pos])) {
233 table = alloc_l2_table();
234
235 __ASSERT(table != NULL, "There is no l2 page table available to "
236 "map 0x%08x\n", page);
237
238 init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
239
240 xtensa_kernel_ptables[l1_pos] =
241 XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
242 sw_attrs, XTENSA_MMU_PAGE_TABLE_ATTR);
243 }
244
245 table = (uint32_t *)(xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
246 table[l2_pos] = pte;
247 }
248 }
249
map_memory(const uint32_t start,const uint32_t end,const uint32_t attrs)250 static void map_memory(const uint32_t start, const uint32_t end,
251 const uint32_t attrs)
252 {
253 #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
254 uint32_t uc_attrs = attrs & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK;
255 uint32_t c_attrs = attrs | XTENSA_MMU_CACHED_WB;
256
257 if (sys_cache_is_ptr_uncached((void *)start)) {
258 map_memory_range(start, end, uc_attrs);
259
260 map_memory_range(POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)start)),
261 POINTER_TO_UINT(sys_cache_cached_ptr_get((void *)end)), c_attrs);
262 } else if (sys_cache_is_ptr_cached((void *)start)) {
263 map_memory_range(start, end, c_attrs);
264
265 map_memory_range(POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)start)),
266 POINTER_TO_UINT(sys_cache_uncached_ptr_get((void *)end)), uc_attrs);
267 } else
268 #endif
269 {
270 map_memory_range(start, end, attrs);
271 }
272 }
273
xtensa_init_page_tables(void)274 static void xtensa_init_page_tables(void)
275 {
276 volatile uint8_t entry;
277 static bool already_inited;
278
279 if (already_inited) {
280 return;
281 }
282 already_inited = true;
283
284 init_page_table(xtensa_kernel_ptables, XTENSA_L1_PAGE_TABLE_ENTRIES);
285 atomic_set_bit(l1_page_table_track, 0);
286
287 for (entry = 0; entry < ARRAY_SIZE(mmu_zephyr_ranges); entry++) {
288 const struct xtensa_mmu_range *range = &mmu_zephyr_ranges[entry];
289
290 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
291 }
292
293 for (entry = 0; entry < xtensa_soc_mmu_ranges_num; entry++) {
294 const struct xtensa_mmu_range *range = &xtensa_soc_mmu_ranges[entry];
295
296 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL);
297 }
298
299 /* Finally, the direct-mapped pages used in the page tables
300 * must be fixed up to use the same cache attribute (but these
301 * must be writable, obviously). They shouldn't be left at
302 * the default.
303 */
304 map_memory_range((uint32_t) &l1_page_table[0],
305 (uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
306 XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
307 map_memory_range((uint32_t) &l2_page_tables[0],
308 (uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES],
309 XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W);
310
311 sys_cache_data_flush_all();
312 }
313
arch_xtensa_mmu_post_init(bool is_core0)314 __weak void arch_xtensa_mmu_post_init(bool is_core0)
315 {
316 ARG_UNUSED(is_core0);
317 }
318
xtensa_mmu_init(void)319 void xtensa_mmu_init(void)
320 {
321 xtensa_init_page_tables();
322
323 xtensa_init_paging(xtensa_kernel_ptables);
324
325 /*
326 * This is used to determine whether we are faulting inside double
327 * exception if this is not zero. Sometimes SoC starts with this not
328 * being set to zero. So clear it during boot.
329 */
330 XTENSA_WSR(ZSR_DEPC_SAVE_STR, 0);
331
332 arch_xtensa_mmu_post_init(_current_cpu->id == 0);
333 }
334
xtensa_mmu_reinit(void)335 void xtensa_mmu_reinit(void)
336 {
337 /* First initialize the hardware */
338 xtensa_init_paging(xtensa_kernel_ptables);
339
340 #ifdef CONFIG_USERSPACE
341 struct k_thread *thread = _current_cpu->current;
342 struct arch_mem_domain *domain =
343 &(thread->mem_domain_info.mem_domain->arch);
344
345
346 /* Set the page table for current context */
347 xtensa_set_paging(domain->asid, domain->ptables);
348 #endif /* CONFIG_USERSPACE */
349
350 arch_xtensa_mmu_post_init(_current_cpu->id == 0);
351 }
352
353 #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
354 /* Zephyr's linker scripts for Xtensa usually puts
355 * something before z_mapped_start (aka .text),
356 * i.e. vecbase, so that we need to reserve those
357 * space or else k_mem_map() would be mapping those,
358 * resulting in faults.
359 */
arch_reserved_pages_update(void)360 __weak void arch_reserved_pages_update(void)
361 {
362 uintptr_t page;
363 int idx;
364
365 for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0;
366 page < (uintptr_t)z_mapped_start;
367 page += CONFIG_MMU_PAGE_SIZE, idx++) {
368 k_mem_page_frame_set(&k_mem_page_frames[idx], K_MEM_PAGE_FRAME_RESERVED);
369 }
370 }
371 #endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */
372
l2_page_table_map(uint32_t * l1_table,void * vaddr,uintptr_t phys,uint32_t flags,bool is_user)373 static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
374 uint32_t flags, bool is_user)
375 {
376 uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
377 uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
378 uint32_t *table;
379
380 sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
381
382 if (is_pte_illegal(l1_table[l1_pos])) {
383 table = alloc_l2_table();
384
385 if (table == NULL) {
386 return false;
387 }
388
389 init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
390
391 l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
392 0, XTENSA_MMU_PAGE_TABLE_ATTR);
393
394 sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
395 }
396
397 table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
398 table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
399 XTENSA_MMU_KERNEL_RING,
400 0, flags);
401
402 sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
403 xtensa_tlb_autorefill_invalidate();
404
405 return true;
406 }
407
__arch_mem_map(void * va,uintptr_t pa,uint32_t xtensa_flags,bool is_user)408 static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags, bool is_user)
409 {
410 bool ret;
411 void *vaddr, *vaddr_uc;
412 uintptr_t paddr, paddr_uc;
413 uint32_t flags, flags_uc;
414
415 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
416 if (sys_cache_is_ptr_cached(va)) {
417 vaddr = va;
418 vaddr_uc = sys_cache_uncached_ptr_get(va);
419 } else {
420 vaddr = sys_cache_cached_ptr_get(va);
421 vaddr_uc = va;
422 }
423
424 if (sys_cache_is_ptr_cached((void *)pa)) {
425 paddr = pa;
426 paddr_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)pa);
427 } else {
428 paddr = (uintptr_t)sys_cache_cached_ptr_get((void *)pa);
429 paddr_uc = pa;
430 }
431
432 flags_uc = (xtensa_flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
433 flags = flags_uc | XTENSA_MMU_CACHED_WB;
434 } else {
435 vaddr = va;
436 paddr = pa;
437 flags = xtensa_flags;
438 }
439
440 ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr, paddr,
441 flags, is_user);
442 __ASSERT(ret, "Virtual address (%p) already mapped", va);
443
444 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
445 ret = l2_page_table_map(xtensa_kernel_ptables, (void *)vaddr_uc, paddr_uc,
446 flags_uc, is_user);
447 __ASSERT(ret, "Virtual address (%p) already mapped", vaddr_uc);
448 }
449
450 #ifndef CONFIG_USERSPACE
451 ARG_UNUSED(ret);
452 #else
453 if (ret) {
454 sys_snode_t *node;
455 struct arch_mem_domain *domain;
456 k_spinlock_key_t key;
457
458 key = k_spin_lock(&z_mem_domain_lock);
459 SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
460 domain = CONTAINER_OF(node, struct arch_mem_domain, node);
461
462 ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr,
463 flags, is_user);
464 __ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
465 vaddr, domain);
466
467 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP) && ret) {
468 ret = l2_page_table_map(domain->ptables,
469 (void *)vaddr_uc, paddr_uc,
470 flags_uc, is_user);
471 __ASSERT(ret, "Virtual address (%p) already mapped for domain %p",
472 vaddr_uc, domain);
473 }
474 }
475 k_spin_unlock(&z_mem_domain_lock, key);
476 }
477 #endif /* CONFIG_USERSPACE */
478 }
479
arch_mem_map(void * virt,uintptr_t phys,size_t size,uint32_t flags)480 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
481 {
482 uint32_t va = (uint32_t)virt;
483 uint32_t pa = (uint32_t)phys;
484 uint32_t rem_size = (uint32_t)size;
485 uint32_t xtensa_flags = 0;
486 k_spinlock_key_t key;
487 bool is_user;
488
489 if (size == 0) {
490 LOG_ERR("Cannot map physical memory at 0x%08X: invalid "
491 "zero size", (uint32_t)phys);
492 k_panic();
493 }
494
495 switch (flags & K_MEM_CACHE_MASK) {
496
497 case K_MEM_CACHE_WB:
498 xtensa_flags |= XTENSA_MMU_CACHED_WB;
499 break;
500 case K_MEM_CACHE_WT:
501 xtensa_flags |= XTENSA_MMU_CACHED_WT;
502 break;
503 case K_MEM_CACHE_NONE:
504 __fallthrough;
505 default:
506 break;
507 }
508
509 if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) {
510 xtensa_flags |= XTENSA_MMU_PERM_W;
511 }
512 if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) {
513 xtensa_flags |= XTENSA_MMU_PERM_X;
514 }
515
516 is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER;
517
518 key = k_spin_lock(&xtensa_mmu_lock);
519
520 while (rem_size > 0) {
521 __arch_mem_map((void *)va, pa, xtensa_flags, is_user);
522
523 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
524 va += KB(4);
525 pa += KB(4);
526 }
527
528 #if CONFIG_MP_MAX_NUM_CPUS > 1
529 xtensa_mmu_tlb_ipi();
530 #endif
531
532 sys_cache_data_flush_and_invd_all();
533 k_spin_unlock(&xtensa_mmu_lock, key);
534 }
535
536 /**
537 * @return True if page is executable (thus need to invalidate ITLB),
538 * false if not.
539 */
l2_page_table_unmap(uint32_t * l1_table,void * vaddr)540 static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
541 {
542 uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
543 uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
544 uint32_t *l2_table;
545 uint32_t table_pos;
546 bool exec;
547
548 sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
549
550 if (is_pte_illegal(l1_table[l1_pos])) {
551 /* We shouldn't be unmapping an illegal entry.
552 * Return true so that we can invalidate ITLB too.
553 */
554 return true;
555 }
556
557 exec = l1_table[l1_pos] & XTENSA_MMU_PERM_X;
558
559 l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
560
561 sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
562
563 l2_table[l2_pos] = XTENSA_MMU_PTE_ILLEGAL;
564
565 sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
566
567 for (l2_pos = 0; l2_pos < XTENSA_L2_PAGE_TABLE_ENTRIES; l2_pos++) {
568 if (!is_pte_illegal(l2_table[l2_pos])) {
569 goto end;
570 }
571 }
572
573 l1_table[l1_pos] = XTENSA_MMU_PTE_ILLEGAL;
574 sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
575
576 table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
577 atomic_clear_bit(l2_page_tables_track, table_pos);
578
579 end:
580 /* Need to invalidate L2 page table as it is no longer valid. */
581 xtensa_tlb_autorefill_invalidate();
582 return exec;
583 }
584
__arch_mem_unmap(void * va)585 static inline void __arch_mem_unmap(void *va)
586 {
587 bool is_exec;
588 void *vaddr, *vaddr_uc;
589
590 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
591 if (sys_cache_is_ptr_cached(va)) {
592 vaddr = va;
593 vaddr_uc = sys_cache_uncached_ptr_get(va);
594 } else {
595 vaddr = sys_cache_cached_ptr_get(va);
596 vaddr_uc = va;
597 }
598 } else {
599 vaddr = va;
600 }
601
602 is_exec = l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr);
603
604 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
605 (void)l2_page_table_unmap(xtensa_kernel_ptables, (void *)vaddr_uc);
606 }
607
608 #ifdef CONFIG_USERSPACE
609 sys_snode_t *node;
610 struct arch_mem_domain *domain;
611 k_spinlock_key_t key;
612
613 key = k_spin_lock(&z_mem_domain_lock);
614 SYS_SLIST_FOR_EACH_NODE(&xtensa_domain_list, node) {
615 domain = CONTAINER_OF(node, struct arch_mem_domain, node);
616
617 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr);
618
619 if (IS_ENABLED(CONFIG_XTENSA_MMU_DOUBLE_MAP)) {
620 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr_uc);
621 }
622 }
623 k_spin_unlock(&z_mem_domain_lock, key);
624 #endif /* CONFIG_USERSPACE */
625 }
626
arch_mem_unmap(void * addr,size_t size)627 void arch_mem_unmap(void *addr, size_t size)
628 {
629 uint32_t va = (uint32_t)addr;
630 uint32_t rem_size = (uint32_t)size;
631 k_spinlock_key_t key;
632
633 if (addr == NULL) {
634 LOG_ERR("Cannot unmap NULL pointer");
635 return;
636 }
637
638 if (size == 0) {
639 LOG_ERR("Cannot unmap virtual memory with zero size");
640 return;
641 }
642
643 key = k_spin_lock(&xtensa_mmu_lock);
644
645 while (rem_size > 0) {
646 __arch_mem_unmap((void *)va);
647
648 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size;
649 va += KB(4);
650 }
651
652 #if CONFIG_MP_MAX_NUM_CPUS > 1
653 xtensa_mmu_tlb_ipi();
654 #endif
655
656 sys_cache_data_flush_and_invd_all();
657 k_spin_unlock(&xtensa_mmu_lock, key);
658 }
659
660 /* This should be implemented in the SoC layer.
661 * This weak version is here to avoid build errors.
662 */
xtensa_mmu_tlb_ipi(void)663 void __weak xtensa_mmu_tlb_ipi(void)
664 {
665 }
666
xtensa_mmu_tlb_shootdown(void)667 void xtensa_mmu_tlb_shootdown(void)
668 {
669 unsigned int key;
670
671 /* Need to lock interrupts to prevent any context
672 * switching until all the page tables are updated.
673 * Or else we would be switching to another thread
674 * and running that with incorrect page tables
675 * which would result in permission issues.
676 */
677 key = arch_irq_lock();
678
679 K_SPINLOCK(&xtensa_mmu_lock) {
680 /* We don't have information on which page tables have changed,
681 * so we just invalidate the cache for all L1 page tables.
682 */
683 sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
684 sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
685 }
686
687 #ifdef CONFIG_USERSPACE
688 struct k_thread *thread = _current_cpu->current;
689
690 /* If current thread is a user thread, we need to see if it has
691 * been migrated to another memory domain as the L1 page table
692 * is different from the currently used one.
693 */
694 if ((thread->base.user_options & K_USER) == K_USER) {
695 uint32_t ptevaddr_entry, ptevaddr,
696 thread_ptables, current_ptables;
697
698 /* Need to read the currently used L1 page table.
699 * We know that L1 page table is always mapped at way
700 * MMU_PTE_WAY, so we can skip the probing step by
701 * generating the query entry directly.
702 */
703 ptevaddr = (uint32_t)xtensa_ptevaddr_get();
704 ptevaddr_entry = XTENSA_MMU_PTE_ENTRY_VADDR(ptevaddr, ptevaddr)
705 | XTENSA_MMU_PTE_WAY;
706 current_ptables = xtensa_dtlb_paddr_read(ptevaddr_entry);
707 thread_ptables = (uint32_t)thread->arch.ptables;
708
709 if (thread_ptables != current_ptables) {
710 /* Need to remap the thread page tables if the ones
711 * indicated by the current thread are different
712 * than the current mapped page table.
713 */
714 struct arch_mem_domain *domain =
715 &(thread->mem_domain_info.mem_domain->arch);
716 xtensa_set_paging(domain->asid, (uint32_t *)thread_ptables);
717 }
718
719 }
720 #endif /* CONFIG_USERSPACE */
721
722 /* L2 are done via autofill, so invalidate autofill TLBs
723 * would refresh the L2 page tables.
724 *
725 * L1 will be refreshed during context switch so no need
726 * to do anything here.
727 */
728 xtensa_tlb_autorefill_invalidate();
729
730 arch_irq_unlock(key);
731 }
732
733 #ifdef CONFIG_USERSPACE
734
alloc_l1_table(void)735 static inline uint32_t *alloc_l1_table(void)
736 {
737 uint16_t idx;
738
739 for (idx = 0; idx < CONFIG_XTENSA_MMU_NUM_L1_TABLES; idx++) {
740 if (!atomic_test_and_set_bit(l1_page_table_track, idx)) {
741 return (uint32_t *)&l1_page_table[idx];
742 }
743 }
744
745 return NULL;
746 }
747
dup_table(void)748 static uint32_t *dup_table(void)
749 {
750 uint16_t i, j;
751 uint32_t *dst_table = alloc_l1_table();
752
753 if (!dst_table) {
754 return NULL;
755 }
756
757 for (i = 0; i < XTENSA_L1_PAGE_TABLE_ENTRIES; i++) {
758 uint32_t *l2_table, *src_l2_table;
759
760 if (is_pte_illegal(xtensa_kernel_ptables[i]) ||
761 (i == XTENSA_MMU_L1_POS(XTENSA_MMU_PTEVADDR))) {
762 dst_table[i] = XTENSA_MMU_PTE_ILLEGAL;
763 continue;
764 }
765
766 src_l2_table = (uint32_t *)(xtensa_kernel_ptables[i] & XTENSA_MMU_PTE_PPN_MASK);
767 l2_table = alloc_l2_table();
768 if (l2_table == NULL) {
769 goto err;
770 }
771
772 for (j = 0; j < XTENSA_L2_PAGE_TABLE_ENTRIES; j++) {
773 uint32_t original_attr = XTENSA_MMU_PTE_SW_GET(src_l2_table[j]);
774
775 l2_table[j] = src_l2_table[j];
776 if (original_attr != 0x0) {
777 uint8_t ring;
778
779 ring = XTENSA_MMU_PTE_RING_GET(l2_table[j]);
780 l2_table[j] = XTENSA_MMU_PTE_ATTR_SET(l2_table[j], original_attr);
781 l2_table[j] = XTENSA_MMU_PTE_RING_SET(l2_table[j],
782 ring == XTENSA_MMU_SHARED_RING ?
783 XTENSA_MMU_SHARED_RING : XTENSA_MMU_KERNEL_RING);
784 }
785 }
786
787 /* The page table is using kernel ASID because we don't
788 * user thread manipulate it.
789 */
790 dst_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
791 0, XTENSA_MMU_PAGE_TABLE_ATTR);
792
793 sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE);
794 }
795
796 sys_cache_data_flush_range((void *)dst_table, XTENSA_L1_PAGE_TABLE_SIZE);
797
798 return dst_table;
799
800 err:
801 /* TODO: Cleanup failed allocation*/
802 return NULL;
803 }
804
arch_mem_domain_init(struct k_mem_domain * domain)805 int arch_mem_domain_init(struct k_mem_domain *domain)
806 {
807 uint32_t *ptables;
808 k_spinlock_key_t key;
809 int ret;
810
811 /*
812 * For now, lets just assert if we have reached the maximum number
813 * of asid we assert.
814 */
815 __ASSERT(asid_count < (XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available");
816
817 key = k_spin_lock(&xtensa_mmu_lock);
818 /* If this is the default domain, we don't need
819 * to create a new set of page tables. We can just
820 * use the kernel page tables and save memory.
821 */
822
823 if (domain == &k_mem_domain_default) {
824 domain->arch.ptables = xtensa_kernel_ptables;
825 domain->arch.asid = asid_count;
826 goto end;
827 }
828
829
830 ptables = dup_table();
831
832 if (ptables == NULL) {
833 ret = -ENOMEM;
834 goto err;
835 }
836
837 domain->arch.ptables = ptables;
838 domain->arch.asid = ++asid_count;
839
840 sys_slist_append(&xtensa_domain_list, &domain->arch.node);
841
842 end:
843 ret = 0;
844
845 err:
846 k_spin_unlock(&xtensa_mmu_lock, key);
847
848 return ret;
849 }
850
region_map_update(uint32_t * ptables,uintptr_t start,size_t size,uint32_t ring,uint32_t flags)851 static int region_map_update(uint32_t *ptables, uintptr_t start,
852 size_t size, uint32_t ring, uint32_t flags)
853 {
854 int ret = 0;
855
856 for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
857 uint32_t *l2_table, pte;
858 uint32_t page = start + offset;
859 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
860 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
861 /* Make sure we grab a fresh copy of L1 page table */
862 sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0]));
863
864 l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
865
866 sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
867
868 pte = XTENSA_MMU_PTE_RING_SET(l2_table[l2_pos], ring);
869 pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags);
870
871 l2_table[l2_pos] = pte;
872
873 sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
874
875 xtensa_dtlb_vaddr_invalidate((void *)page);
876 }
877
878 return ret;
879 }
880
update_region(uint32_t * ptables,uintptr_t start,size_t size,uint32_t ring,uint32_t flags,uint32_t option)881 static inline int update_region(uint32_t *ptables, uintptr_t start,
882 size_t size, uint32_t ring, uint32_t flags,
883 uint32_t option)
884 {
885 int ret;
886 k_spinlock_key_t key;
887
888 key = k_spin_lock(&xtensa_mmu_lock);
889
890 #ifdef CONFIG_XTENSA_MMU_DOUBLE_MAP
891 uintptr_t va, va_uc;
892 uint32_t new_flags, new_flags_uc;
893
894 if (sys_cache_is_ptr_cached((void *)start)) {
895 va = start;
896 va_uc = (uintptr_t)sys_cache_uncached_ptr_get((void *)start);
897 } else {
898 va = (uintptr_t)sys_cache_cached_ptr_get((void *)start);
899 va_uc = start;
900 }
901
902 new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
903 new_flags = new_flags_uc | XTENSA_MMU_CACHED_WB;
904
905 ret = region_map_update(ptables, va, size, ring, new_flags);
906
907 if (ret == 0) {
908 ret = region_map_update(ptables, va_uc, size, ring, new_flags_uc);
909 }
910 #else
911 ret = region_map_update(ptables, start, size, ring, flags);
912 #endif /* CONFIG_XTENSA_MMU_DOUBLE_MAP */
913
914 #if CONFIG_MP_MAX_NUM_CPUS > 1
915 if ((option & OPTION_NO_TLB_IPI) != OPTION_NO_TLB_IPI) {
916 xtensa_mmu_tlb_ipi();
917 }
918 #endif
919
920 sys_cache_data_flush_and_invd_all();
921 k_spin_unlock(&xtensa_mmu_lock, key);
922
923 return ret;
924 }
925
reset_region(uint32_t * ptables,uintptr_t start,size_t size,uint32_t option)926 static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option)
927 {
928 return update_region(ptables, start, size,
929 XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W, option);
930 }
931
xtensa_user_stack_perms(struct k_thread * thread)932 void xtensa_user_stack_perms(struct k_thread *thread)
933 {
934 (void)memset((void *)thread->stack_info.start,
935 (IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
936 thread->stack_info.size - thread->stack_info.delta);
937
938 update_region(thread_page_tables_get(thread),
939 thread->stack_info.start, thread->stack_info.size,
940 XTENSA_MMU_USER_RING, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, 0);
941 }
942
arch_mem_domain_max_partitions_get(void)943 int arch_mem_domain_max_partitions_get(void)
944 {
945 return CONFIG_MAX_DOMAIN_PARTITIONS;
946 }
947
arch_mem_domain_partition_remove(struct k_mem_domain * domain,uint32_t partition_id)948 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
949 uint32_t partition_id)
950 {
951 struct k_mem_partition *partition = &domain->partitions[partition_id];
952
953 /* Reset the partition's region back to defaults */
954 return reset_region(domain->arch.ptables, partition->start,
955 partition->size, 0);
956 }
957
arch_mem_domain_partition_add(struct k_mem_domain * domain,uint32_t partition_id)958 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
959 uint32_t partition_id)
960 {
961 struct k_mem_partition *partition = &domain->partitions[partition_id];
962 uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? XTENSA_MMU_USER_RING :
963 XTENSA_MMU_KERNEL_RING;
964
965 return update_region(domain->arch.ptables, partition->start,
966 partition->size, ring, partition->attr, 0);
967 }
968
969 /* These APIs don't need to do anything */
arch_mem_domain_thread_add(struct k_thread * thread)970 int arch_mem_domain_thread_add(struct k_thread *thread)
971 {
972 int ret = 0;
973 bool is_user, is_migration;
974 uint32_t *old_ptables;
975 struct k_mem_domain *domain;
976
977 old_ptables = thread->arch.ptables;
978 domain = thread->mem_domain_info.mem_domain;
979 thread->arch.ptables = domain->arch.ptables;
980
981 is_user = (thread->base.user_options & K_USER) != 0;
982 is_migration = (old_ptables != NULL) && is_user;
983
984 if (is_migration) {
985 /* Give access to the thread's stack in its new
986 * memory domain if it is migrating.
987 */
988 update_region(thread_page_tables_get(thread),
989 thread->stack_info.start, thread->stack_info.size,
990 XTENSA_MMU_USER_RING,
991 XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
992 OPTION_NO_TLB_IPI);
993 /* and reset thread's stack permission in
994 * the old page tables.
995 */
996 ret = reset_region(old_ptables,
997 thread->stack_info.start,
998 thread->stack_info.size, 0);
999 }
1000
1001 /* Need to switch to new page tables if this is
1002 * the current thread running.
1003 */
1004 if (thread == _current_cpu->current) {
1005 xtensa_set_paging(domain->arch.asid, thread->arch.ptables);
1006 }
1007
1008 #if CONFIG_MP_MAX_NUM_CPUS > 1
1009 /* Need to tell other CPUs to switch to the new page table
1010 * in case the thread is running on one of them.
1011 *
1012 * Note that there is no need to send TLB IPI if this is
1013 * migration as it was sent above during reset_region().
1014 */
1015 if ((thread != _current_cpu->current) && !is_migration) {
1016 xtensa_mmu_tlb_ipi();
1017 }
1018 #endif
1019
1020 return ret;
1021 }
1022
arch_mem_domain_thread_remove(struct k_thread * thread)1023 int arch_mem_domain_thread_remove(struct k_thread *thread)
1024 {
1025 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
1026
1027 if ((thread->base.user_options & K_USER) == 0) {
1028 return 0;
1029 }
1030
1031 if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
1032 /* Thread is migrating to another memory domain and not
1033 * exiting for good; we weren't called from
1034 * z_thread_abort(). Resetting the stack region will
1035 * take place in the forthcoming thread_add() call.
1036 */
1037 return 0;
1038 }
1039
1040 /* Restore permissions on the thread's stack area since it is no
1041 * longer a member of the domain.
1042 *
1043 * Note that, since every thread must have an associated memory
1044 * domain, removing a thread from domain will be followed by
1045 * adding it back to another. So there is no need to send TLB IPI
1046 * at this point.
1047 */
1048 return reset_region(domain->arch.ptables,
1049 thread->stack_info.start,
1050 thread->stack_info.size, OPTION_NO_TLB_IPI);
1051 }
1052
page_validate(uint32_t * ptables,uint32_t page,uint8_t ring,bool write)1053 static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool write)
1054 {
1055 uint8_t asid_ring;
1056 uint32_t rasid, pte, *l2_table;
1057 uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
1058 uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
1059
1060 if (is_pte_illegal(ptables[l1_pos])) {
1061 return false;
1062 }
1063
1064 l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
1065 pte = l2_table[l2_pos];
1066
1067 if (is_pte_illegal(pte)) {
1068 return false;
1069 }
1070
1071 asid_ring = 0;
1072 rasid = xtensa_rasid_get();
1073 for (uint32_t i = 0; i < 4; i++) {
1074 if (XTENSA_MMU_PTE_ASID_GET(pte, rasid) == XTENSA_MMU_RASID_ASID_GET(rasid, i)) {
1075 asid_ring = i;
1076 break;
1077 }
1078 }
1079
1080 if (ring > asid_ring) {
1081 return false;
1082 }
1083
1084 if (write) {
1085 return (XTENSA_MMU_PTE_ATTR_GET((pte)) & XTENSA_MMU_PERM_W) != 0;
1086 }
1087
1088 return true;
1089 }
1090
mem_buffer_validate(const void * addr,size_t size,int write,int ring)1091 static int mem_buffer_validate(const void *addr, size_t size, int write, int ring)
1092 {
1093 int ret = 0;
1094 uint8_t *virt;
1095 size_t aligned_size;
1096 const struct k_thread *thread = _current;
1097 uint32_t *ptables = thread_page_tables_get(thread);
1098
1099 /* addr/size arbitrary, fix this up into an aligned region */
1100 k_mem_region_align((uintptr_t *)&virt, &aligned_size,
1101 (uintptr_t)addr, size, CONFIG_MMU_PAGE_SIZE);
1102
1103 for (size_t offset = 0; offset < aligned_size;
1104 offset += CONFIG_MMU_PAGE_SIZE) {
1105 if (!page_validate(ptables, (uint32_t)(virt + offset), ring, write)) {
1106 ret = -1;
1107 break;
1108 }
1109 }
1110
1111 return ret;
1112 }
1113
xtensa_mem_kernel_has_access(void * addr,size_t size,int write)1114 bool xtensa_mem_kernel_has_access(void *addr, size_t size, int write)
1115 {
1116 return mem_buffer_validate(addr, size, write, XTENSA_MMU_KERNEL_RING) == 0;
1117 }
1118
arch_buffer_validate(const void * addr,size_t size,int write)1119 int arch_buffer_validate(const void *addr, size_t size, int write)
1120 {
1121 return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
1122 }
1123
xtensa_swap_update_page_tables(struct k_thread * incoming)1124 void xtensa_swap_update_page_tables(struct k_thread *incoming)
1125 {
1126 uint32_t *ptables = incoming->arch.ptables;
1127 struct arch_mem_domain *domain =
1128 &(incoming->mem_domain_info.mem_domain->arch);
1129
1130 xtensa_set_paging(domain->asid, ptables);
1131
1132 #ifdef CONFIG_XTENSA_INVALIDATE_MEM_DOMAIN_TLB_ON_SWAP
1133 struct k_mem_domain *mem_domain = incoming->mem_domain_info.mem_domain;
1134
1135 for (int idx = 0; idx < mem_domain->num_partitions; idx++) {
1136 struct k_mem_partition *part = &mem_domain->partitions[idx];
1137 uintptr_t end = part->start + part->size;
1138
1139 for (uintptr_t addr = part->start; addr < end; addr += CONFIG_MMU_PAGE_SIZE) {
1140 xtensa_dtlb_vaddr_invalidate((void *)addr);
1141 }
1142 }
1143 #endif
1144 }
1145
1146 #endif /* CONFIG_USERSPACE */
1147