Lines Matching +full:permission +full:- +full:flags

3  * SPDX-License-Identifier: Apache-2.0
9 #include <zephyr/linker/linker-defs.h>
97 * compared with the current ring (CRING) to check the permission.
118 * cacheable, read / write and non-executable
151 /* Mark rodata segment cacheable, read only and non-executable */
163 if ((thread->base.user_options & K_USER) != 0U) { in thread_page_tables_get()
164 return thread->arch.ptables; in thread_page_tables_get()
283 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL); in xtensa_init_page_tables()
289 map_memory(range->start, range->end, range->attrs | XTENSA_MMU_PTE_ATTR_ORIGINAL); in xtensa_init_page_tables()
292 /* Finally, the direct-mapped pages used in the page tables in xtensa_init_page_tables()
325 arch_xtensa_mmu_post_init(_current_cpu->id == 0); in xtensa_mmu_init()
334 struct k_thread *thread = _current_cpu->current; in xtensa_mmu_reinit()
336 &(thread->mem_domain_info.mem_domain->arch); in xtensa_mmu_reinit()
340 xtensa_set_paging(domain->asid, domain->ptables); in xtensa_mmu_reinit()
343 arch_xtensa_mmu_post_init(_current_cpu->id == 0); in xtensa_mmu_reinit()
367 uint32_t flags, bool is_user) in l2_page_table_map() argument
393 0, flags); in l2_page_table_map()
406 uint32_t flags, flags_uc; in __arch_mem_map() local
426 flags = flags_uc | XTENSA_MMU_CACHED_WB; in __arch_mem_map()
430 flags = xtensa_flags; in __arch_mem_map()
434 flags, is_user); in __arch_mem_map()
455 ret = l2_page_table_map(domain->ptables, (void *)vaddr, paddr, in __arch_mem_map()
456 flags, is_user); in __arch_mem_map()
461 ret = l2_page_table_map(domain->ptables, in __arch_mem_map()
473 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) in arch_mem_map() argument
488 switch (flags & K_MEM_CACHE_MASK) { in arch_mem_map()
502 if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) { in arch_mem_map()
505 if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) { in arch_mem_map()
509 is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER; in arch_mem_map()
516 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; in arch_mem_map()
569 table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES); in l2_page_table_unmap()
610 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr); in __arch_mem_unmap()
613 (void)l2_page_table_unmap(domain->ptables, (void *)vaddr_uc); in __arch_mem_unmap()
641 rem_size -= (rem_size >= KB(4)) ? KB(4) : rem_size; in arch_mem_unmap()
668 * which would result in permission issues. in xtensa_mmu_tlb_shootdown()
681 struct k_thread *thread = _current_cpu->current; in xtensa_mmu_tlb_shootdown()
687 if ((thread->base.user_options & K_USER) == K_USER) { in xtensa_mmu_tlb_shootdown()
700 thread_ptables = (uint32_t)thread->arch.ptables; in xtensa_mmu_tlb_shootdown()
708 &(thread->mem_domain_info.mem_domain->arch); in xtensa_mmu_tlb_shootdown()
709 xtensa_set_paging(domain->asid, (uint32_t *)thread_ptables); in xtensa_mmu_tlb_shootdown()
817 domain->arch.ptables = xtensa_kernel_ptables; in arch_mem_domain_init()
818 domain->arch.asid = asid_count; in arch_mem_domain_init()
826 ret = -ENOMEM; in arch_mem_domain_init()
830 domain->arch.ptables = ptables; in arch_mem_domain_init()
831 domain->arch.asid = ++asid_count; in arch_mem_domain_init()
833 sys_slist_append(&xtensa_domain_list, &domain->arch.node); in arch_mem_domain_init()
845 size_t size, uint32_t ring, uint32_t flags) in region_map_update() argument
862 pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags); in region_map_update()
875 size_t size, uint32_t ring, uint32_t flags, in update_region() argument
895 new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK); in update_region()
904 ret = region_map_update(ptables, start, size, ring, flags); in update_region()
927 (void)memset((void *)thread->stack_info.start, in xtensa_user_stack_perms()
929 thread->stack_info.size - thread->stack_info.delta); in xtensa_user_stack_perms()
932 thread->stack_info.start, thread->stack_info.size, in xtensa_user_stack_perms()
944 struct k_mem_partition *partition = &domain->partitions[partition_id]; in arch_mem_domain_partition_remove()
947 return reset_region(domain->arch.ptables, partition->start, in arch_mem_domain_partition_remove()
948 partition->size, 0); in arch_mem_domain_partition_remove()
954 struct k_mem_partition *partition = &domain->partitions[partition_id]; in arch_mem_domain_partition_add()
955 uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? XTENSA_MMU_USER_RING : in arch_mem_domain_partition_add()
958 return update_region(domain->arch.ptables, partition->start, in arch_mem_domain_partition_add()
959 partition->size, ring, partition->attr, 0); in arch_mem_domain_partition_add()
970 old_ptables = thread->arch.ptables; in arch_mem_domain_thread_add()
971 domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_add()
972 thread->arch.ptables = domain->arch.ptables; in arch_mem_domain_thread_add()
974 is_user = (thread->base.user_options & K_USER) != 0; in arch_mem_domain_thread_add()
982 thread->stack_info.start, thread->stack_info.size, in arch_mem_domain_thread_add()
986 /* and reset thread's stack permission in in arch_mem_domain_thread_add()
990 thread->stack_info.start, in arch_mem_domain_thread_add()
991 thread->stack_info.size, 0); in arch_mem_domain_thread_add()
997 if (thread == _current_cpu->current) { in arch_mem_domain_thread_add()
998 xtensa_set_paging(domain->arch.asid, thread->arch.ptables); in arch_mem_domain_thread_add()
1008 if ((thread != _current_cpu->current) && !is_migration) { in arch_mem_domain_thread_add()
1018 struct k_mem_domain *domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_remove()
1020 if ((thread->base.user_options & K_USER) == 0) { in arch_mem_domain_thread_remove()
1024 if ((thread->base.thread_state & _THREAD_DEAD) == 0) { in arch_mem_domain_thread_remove()
1041 return reset_region(domain->arch.ptables, in arch_mem_domain_thread_remove()
1042 thread->stack_info.start, in arch_mem_domain_thread_remove()
1043 thread->stack_info.size, OPTION_NO_TLB_IPI); in arch_mem_domain_thread_remove()
1099 ret = -1; in mem_buffer_validate()
1119 uint32_t *ptables = incoming->arch.ptables; in xtensa_swap_update_page_tables()
1121 &(incoming->mem_domain_info.mem_domain->arch); in xtensa_swap_update_page_tables()
1123 xtensa_set_paging(domain->asid, ptables); in xtensa_swap_update_page_tables()
1126 struct k_mem_domain *mem_domain = incoming->mem_domain_info.mem_domain; in xtensa_swap_update_page_tables()
1128 for (int idx = 0; idx < mem_domain->num_partitions; idx++) { in xtensa_swap_update_page_tables()
1129 struct k_mem_partition *part = &mem_domain->partitions[idx]; in xtensa_swap_update_page_tables()
1130 uintptr_t end = part->start + part->size; in xtensa_swap_update_page_tables()
1132 for (uintptr_t addr = part->start; addr < end; addr += CONFIG_MMU_PAGE_SIZE) { in xtensa_swap_update_page_tables()