Lines Matching +full:dcache +full:- +full:alignment
7 * SPDX-License-Identifier: Apache-2.0
22 #include <zephyr/linker/linker-defs.h>
63 unsigned int i = (pte - xlat_tables) / Ln_XLAT_NUM_ENTRIES; in table_index()
78 MMU_DEBUG("table [%d]%p: usage %#x -> %#x\n", i, table, prev_count, new_count); in table_usage()
101 table_usage(table, -ref_unit); in dec_table_ref()
175 bool aligned = (desc & PTE_PHYSADDR_MASK & (level_size - 1)) == 0; in is_desc_block_aligned()
200 MMU_DEBUG("---\n"); in debug_show_pte()
218 MMU_DEBUG("[paged-out] "); in debug_show_pte()
225 MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_RO) ? "-RO" : "-RW"); in debug_show_pte()
226 MMU_DEBUG((*pte & PTE_BLOCK_DESC_NS) ? "-NS" : "-S"); in debug_show_pte()
227 MMU_DEBUG((*pte & PTE_BLOCK_DESC_AP_ELx) ? "-ELx" : "-ELh"); in debug_show_pte()
228 MMU_DEBUG((*pte & PTE_BLOCK_DESC_PXN) ? "-PXN" : "-PX"); in debug_show_pte()
229 MMU_DEBUG((*pte & PTE_BLOCK_DESC_UXN) ? "-UXN" : "-UX"); in debug_show_pte()
230 MMU_DEBUG((*pte & PTE_SW_WRITABLE) ? "-WRITABLE" : ""); in debug_show_pte()
333 return -EBUSY; in set_mapping()
340 level_size -= (virt & (level_size - 1)); in set_mapping()
347 if ((size < level_size) || (virt & (level_size - 1)) || in set_mapping()
352 return -ENOMEM; in set_mapping()
368 size -= level_size; in set_mapping()
384 for ( ; size; virt += step, size -= step) { in del_mapping()
385 step = level_size - (virt & (level_size - 1)); in del_mapping()
411 table_usage(pte, -1); in del_mapping()
433 * updated. Thus, entries may become non-global. in dup_table()
434 * To keep the invariants very simple, we thus force the non-global in dup_table()
462 for ( ; size; virt += step, size -= step) { in privatize_table()
463 step = level_size - (virt & (level_size - 1)); in privatize_table()
482 return -ENOMEM; in privatize_table()
519 ret = privatize_table(dst_pt->base_xlat_table, src_pt->base_xlat_table, in privatize_page_range()
545 table_usage(table, -free_count); in discard_table()
555 for ( ; size; virt += step, size -= step) { in globalize_table()
556 step = level_size - (virt & (level_size - 1)); in globalize_table()
575 table_usage(dst_table, -1); in globalize_table()
588 return -ENOMEM; in globalize_table()
609 table_usage(dst_table, -1); in globalize_table()
648 ret = globalize_table(dst_pt->base_xlat_table, src_pt->base_xlat_table, in globalize_page_range()
669 * +--------------------+ in get_region_desc()
701 /* Access to Device memory and non-cacheable memory are coherent in get_region_desc()
707 /* Map device memory as execute-never */ in get_region_desc()
730 /* non-Global bit */ in get_region_desc()
747 __ASSERT(((virt | phys | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, in __add_map()
750 return set_mapping(ptables->base_xlat_table, virt, size, desc, may_overwrite); in __add_map()
771 __ASSERT(((virt | size) & (CONFIG_MMU_PAGE_SIZE - 1)) == 0, in remove_map()
775 del_mapping(ptables->base_xlat_table, virt, size, BASE_XLAT_LEVEL); in remove_map()
804 * cacheable, read-write
805 * Note: read-write region is marked execute-never internally
818 /* Mark rodata segment cacheable, read only and execute-never */
825 /* Mark nocache segment noncachable, read-write and execute-never */
837 uintptr_t address = (uintptr_t)range->start; in add_arm_mmu_flat_range()
838 size_t size = (uintptr_t)range->end - address; in add_arm_mmu_flat_range()
842 __add_map(ptables, range->name, address, address, in add_arm_mmu_flat_range()
843 size, range->attrs | extra_flags); in add_arm_mmu_flat_range()
851 if (region->size || region->attrs) { in add_arm_mmu_region()
853 __add_map(ptables, region->name, region->base_pa, region->base_va, in add_arm_mmu_region()
854 region->size, region->attrs | extra_flags); in add_arm_mmu_region()
887 max_va = MAX(max_va, region->base_va + region->size); in setup_page_tables()
888 max_pa = MAX(max_pa, region->base_pa + region->size); in setup_page_tables()
917 size = POINTER_TO_UINT(range->end) - POINTER_TO_UINT(range->start); in setup_page_tables()
918 inv_dcache_after_map_helper(range->start, size, range->attrs); in setup_page_tables()
923 inv_dcache_after_map_helper(UINT_TO_POINTER(region->base_va), region->size, in setup_page_tables()
924 region->attrs); in setup_page_tables()
952 * inner shareable. Due to Cortex-A57 erratum #822227 we must in get_tcr()
969 write_ttbr0_el1((uint64_t)ptables->base_xlat_table); in enable_mmu_el1()
981 MMU_DEBUG("MMU enabled with dcache\n"); in enable_mmu_el1()
1034 domain_ptables = &domain->ptables; in sync_domains()
1053 /* Translate flags argument into HW-recognized entry flags. */ in __arch_mem_map()
1063 * (Normal memory Non-cacheable) in __arch_mem_map()
1090 return -ENOTSUP; in __arch_mem_map()
1156 return -EFAULT; in arch_page_phys_get()
1167 size_t alignment = CONFIG_MMU_PAGE_SIZE; in arch_virt_region_align() local
1171 for (level = XLAT_LAST_LEVEL; level >= BASE_XLAT_LEVEL; level--) { in arch_virt_region_align()
1178 if ((phys & (level_size - 1))) { in arch_virt_region_align()
1182 alignment = level_size; in arch_virt_region_align()
1185 return alignment; in arch_virt_region_align()
1206 struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; in arch_mem_domain_init()
1215 * Pick a new ASID. We use round-robin in arch_mem_domain_init()
1224 domain_ptables->base_xlat_table = in arch_mem_domain_init()
1227 if (!domain_ptables->base_xlat_table) { in arch_mem_domain_init()
1228 return -ENOMEM; in arch_mem_domain_init()
1231 domain_ptables->ttbr0 = (((uint64_t)asid) << TTBR_ASID_SHIFT) | in arch_mem_domain_init()
1232 ((uint64_t)(uintptr_t)domain_ptables->base_xlat_table); in arch_mem_domain_init()
1234 sys_slist_append(&domain_list, &domain->arch.node); in arch_mem_domain_init()
1268 struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; in arch_mem_domain_partition_add()
1269 struct k_mem_partition *ptn = &domain->partitions[partition_id]; in arch_mem_domain_partition_add()
1271 return private_map(domain_ptables, "partition", ptn->start, ptn->start, in arch_mem_domain_partition_add()
1272 ptn->size, ptn->attr.attrs | MT_NORMAL); in arch_mem_domain_partition_add()
1278 struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables; in arch_mem_domain_partition_remove()
1279 struct k_mem_partition *ptn = &domain->partitions[partition_id]; in arch_mem_domain_partition_remove()
1282 ptn->start, ptn->size); in arch_mem_domain_partition_remove()
1288 return private_map(ptables, "thread_stack", thread->stack_info.start, in map_thread_stack()
1289 thread->stack_info.start, thread->stack_info.size, in map_thread_stack()
1300 domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_add()
1301 domain_ptables = &domain->arch.ptables; in arch_mem_domain_thread_add()
1302 old_ptables = thread->arch.ptables; in arch_mem_domain_thread_add()
1304 is_user = (thread->base.user_options & K_USER) != 0; in arch_mem_domain_thread_add()
1311 thread->arch.ptables = domain_ptables; in arch_mem_domain_thread_add()
1322 ret = reset_map(old_ptables, __func__, thread->stack_info.start, in arch_mem_domain_thread_add()
1323 thread->stack_info.size); in arch_mem_domain_thread_add()
1334 domain = thread->mem_domain_info.mem_domain; in arch_mem_domain_thread_remove()
1335 domain_ptables = &domain->arch.ptables; in arch_mem_domain_thread_remove()
1337 if ((thread->base.user_options & K_USER) == 0) { in arch_mem_domain_thread_remove()
1341 if ((thread->base.thread_state & _THREAD_DEAD) == 0) { in arch_mem_domain_thread_remove()
1345 return reset_map(domain_ptables, __func__, thread->stack_info.start, in arch_mem_domain_thread_remove()
1346 thread->stack_info.size); in arch_mem_domain_thread_remove()
1351 struct arm_mmu_ptables *ptables = incoming->arch.ptables; in z_arm64_swap_ptables()
1353 uint64_t new_ttbr0 = ptables->ttbr0; in z_arm64_swap_ptables()
1371 if ((incoming->base.user_options & K_USER) == 0) { in z_arm64_thread_mem_domains_init()
1375 ptables = incoming->arch.ptables; in z_arm64_thread_mem_domains_init()
1396 uint64_t *table = ptables->base_xlat_table; in get_pte_location()
1588 if (arch_irq_unlocked(esf->spsr)) { in do_mem_page_fault()
1617 ((uintptr_t)K_MEM_VIRT_RAM_END - 1))) { in z_arm64_do_demand_paging()
1640 * 2) the page was read-only and a write occurred so we clear the in z_arm64_do_demand_paging()
1663 * returning by making the page read-write right away in z_arm64_do_demand_paging()
1681 /* make it "dirty" i.e. read-write */ in z_arm64_do_demand_paging()