Lines Matching +full:mapped +full:- +full:addr
4 * SPDX-License-Identifier: Apache-2.0
17 #include <zephyr/linker/linker-defs.h>
31 * - A page frame is a page-sized physical memory region in RAM. It is a
37 * - A data page is a page-sized region of data. It may exist in a page frame,
86 printk("-"); in page_frame_dump()
160 * +--------------+ <- K_MEM_VIRT_RAM_START
161 * | Undefined VM | <- May contain ancillary regions like x86_64's locore
162 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
168 * +--------------+ <- K_MEM_VM_FREE_START
173 * |..............| <- mapping_pos (grows downward as more mappings are made)
175 * +--------------+
177 * +--------------+
179 * +--------------+
181 * +--------------+ <- mappings start here
182 * | Reserved | <- special purpose virtual page(s) of size K_MEM_VM_RESERVED
183 * +--------------+ <- K_MEM_VIRT_RAM_END
199 #define Z_VIRT_REGION_END_ADDR (K_MEM_VIRT_RAM_END - K_MEM_VM_RESERVED)
204 - (offset * CONFIG_MMU_PAGE_SIZE) - size; in virt_from_bitmap_offset()
210 - POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE; in virt_to_bitmap_offset()
231 - POINTER_TO_UINT(K_MEM_VIRT_RAM_START); in virt_region_init()
256 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR), in virt_region_free()
259 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
274 (((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) && in virt_region_free()
275 ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
279 size_t adjusted_sz = adjusted_end - adjusted_start; in virt_region_free()
301 num_bits = (size + align - CONFIG_MMU_PAGE_SIZE) / CONFIG_MMU_PAGE_SIZE; in virt_region_alloc()
322 * +--------------+ <- K_MEM_VIRT_RAM_START in virt_region_alloc()
324 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START) in virt_region_alloc()
330 * +--------------+ <- K_MEM_VM_FREE_START in virt_region_alloc()
332 * +==============+ <- dest_addr in virt_region_alloc()
334 * |..............| <- aligned_dest_addr in virt_region_alloc()
339 * |..............| <- aligned_dest_addr + size in virt_region_alloc()
341 * +==============+ <- offset from K_MEM_VIRT_RAM_END == dest_addr + alloc_size in virt_region_alloc()
343 * +--------------+ in virt_region_alloc()
345 * +--------------+ in virt_region_alloc()
347 * +--------------+ <- K_MEM_VIRT_RAM_END in virt_region_alloc()
352 aligned_dest_addr - dest_addr); in virt_region_alloc()
353 if (((dest_addr + alloc_size) - (aligned_dest_addr + size)) > 0) { in virt_region_alloc()
355 (dest_addr + alloc_size) - (aligned_dest_addr + size)); in virt_region_alloc()
379 * However, there are use-cases to consolidate free pages such that entire
406 z_free_page_count--; in free_page_frame_list_get()
410 pf->va_and_flags = 0; in free_page_frame_list_get()
422 sys_sfnode_init(&pf->node, K_MEM_PAGE_FRAME_FREE); in free_page_frame_list_put()
423 sys_sflist_append(&free_page_frame_list, &pf->node); in free_page_frame_list_put()
434 pf->va_and_flags = 0; in page_frame_free_locked()
442 /* Called after the frame is mapped in the arch layer, to update our
445 static void frame_mapped_set(struct k_mem_page_frame *pf, void *addr) in frame_mapped_set() argument
454 * This is uncommon, use-cases are for things like the in frame_mapped_set()
458 "non-pinned and already mapped to %p", in frame_mapped_set()
461 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1; in frame_mapped_set()
462 uintptr_t va = (uintptr_t)addr & ~flags_mask; in frame_mapped_set()
464 pf->va_and_flags &= flags_mask; in frame_mapped_set()
465 pf->va_and_flags |= va | K_MEM_PAGE_FRAME_MAPPED; in frame_mapped_set()
469 /* Go through page frames to find the physical address mapped
473 * @param[out] phys Physical address mapped to the input virtual address
477 * @retval -EFAULT if virtual address is not mapped
483 int ret = -EFAULT; in virt_to_page_frame()
513 * TODO: Add optional support for copy-on-write mappings to a zero page instead
516 * page-ins as memory is mapped and physical RAM or backing store storage will
517 * not be used if the mapped memory is unused. The cost is an empty physical
520 static int map_anon_page(void *addr, uint32_t flags) in map_anon_page() argument
540 return -ENOMEM; in map_anon_page()
545 pf->va_and_flags = 0; in map_anon_page()
547 return -ENOMEM; in map_anon_page()
552 arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags); in map_anon_page()
557 frame_mapped_set(pf, addr); in map_anon_page()
564 LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys); in map_anon_page()
635 LOG_DBG("memory mapping anon pages %p to %p unpaged", dst, pos-1); in k_mem_map_phys_guard()
646 * call k_mem_unmap(dst, pos - dst) in k_mem_map_phys_guard()
669 /* If we later implement mappings to a copy-on-write in k_mem_map_phys_guard()
678 void k_mem_unmap_phys_guard(void *addr, size_t size, bool is_anon) in k_mem_unmap_phys_guard() argument
688 __ASSERT_NO_MSG(POINTER_TO_UINT(addr) >= CONFIG_MMU_PAGE_SIZE); in k_mem_unmap_phys_guard()
693 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
699 * Bail if not, as this is probably a region not mapped in k_mem_unmap_phys_guard()
702 pos = addr; in k_mem_unmap_phys_guard()
703 ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL); in k_mem_unmap_phys_guard()
707 __func__, addr, size); in k_mem_unmap_phys_guard()
715 __func__, addr, size); in k_mem_unmap_phys_guard()
721 VIRT_FOREACH(addr, size, pos) { in k_mem_unmap_phys_guard()
759 /* Found an address not mapped. Do not continue. */ in k_mem_unmap_phys_guard()
777 "%s: 0x%lx is not a mapped page frame", __func__, phys); in k_mem_unmap_phys_guard()
779 /* Page frame is not marked mapped. in k_mem_unmap_phys_guard()
798 * Unmapping previous mapped memory with specific physical address. in k_mem_unmap_phys_guard()
801 * have been unmapped. We just need to unmapped the in-between in k_mem_unmap_phys_guard()
802 * region [addr, (addr + size)). in k_mem_unmap_phys_guard()
804 arch_mem_unmap(addr, size); in k_mem_unmap_phys_guard()
807 /* There are guard pages just before and after the mapped in k_mem_unmap_phys_guard()
810 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
818 int k_mem_update_flags(void *addr, size_t size, uint32_t flags) in k_mem_update_flags() argument
824 k_mem_assert_virtual_region(addr, size); in k_mem_update_flags()
833 ret = arch_page_phys_get(addr, &phys); in k_mem_update_flags()
838 /* TODO: detect and handle paged-out memory as well */ in k_mem_update_flags()
840 arch_mem_unmap(addr, size); in k_mem_update_flags()
841 arch_mem_map(addr, phys, size, flags); in k_mem_update_flags()
858 ret = z_free_page_count - CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE; in k_mem_free_get()
872 * @param[in] phys Physical address of region to be mapped, aligned to MMU_PAGE_SIZE
873 * @param[in] size Size of region to be mapped, aligned to MMU_PAGE_SIZE
901 __ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled"); in k_mem_map_phys_bare()
906 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys); in k_mem_map_phys_bare()
907 __ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)), in k_mem_map_phys_bare()
928 (uintptr_t)(K_MEM_VIRT_RAM_END - 1)) || in k_mem_map_phys_bare()
929 IN_RANGE(aligned_phys + aligned_size - 1, in k_mem_map_phys_bare()
931 (uintptr_t)(K_MEM_VIRT_RAM_END - 1))) { in k_mem_map_phys_bare()
935 size_t adjusted_sz = adjusted_end - adjusted_start; in k_mem_map_phys_bare()
954 ((uintptr_t)dest_addr + (size - 1)), in k_mem_map_phys_bare()
967 /* May re-visit this in the future, but for now running out of in k_mem_map_phys_bare()
988 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_virt); in k_mem_unmap_phys_bare()
989 __ASSERT(aligned_virt < (aligned_virt + (aligned_size - 1)), in k_mem_unmap_phys_bare()
1008 uintptr_t addr, size_t size, size_t align) in k_mem_region_align() argument
1012 /* The actual mapped region must be page-aligned. Round down the in k_mem_region_align()
1015 *aligned_addr = ROUND_DOWN(addr, align); in k_mem_region_align()
1016 addr_offset = addr - *aligned_addr; in k_mem_region_align()
1027 uint8_t *addr; in mark_linker_section_pinned() local
1033 size_t pinned_size = pinned_end - pinned_start; in mark_linker_section_pinned()
1035 VIRT_FOREACH(UINT_TO_POINTER(pinned_start), pinned_size, addr) in mark_linker_section_pinned()
1037 pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr)); in mark_linker_section_pinned()
1038 frame_mapped_set(pf, addr); in mark_linker_section_pinned()
1058 uint8_t *addr; in z_paging_ondemand_section_map() local
1065 VIRT_FOREACH(lnkr_ondemand_text_start, size, addr) { in z_paging_ondemand_section_map()
1066 k_mem_paging_backing_store_location_query(addr, &location); in z_paging_ondemand_section_map()
1067 arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); in z_paging_ondemand_section_map()
1069 virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); in z_paging_ondemand_section_map()
1074 VIRT_FOREACH(lnkr_ondemand_rodata_start, size, addr) { in z_paging_ondemand_section_map()
1075 k_mem_paging_backing_store_location_query(addr, &location); in z_paging_ondemand_section_map()
1076 arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); in z_paging_ondemand_section_map()
1078 virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); in z_paging_ondemand_section_map()
1086 uint8_t *addr; in z_mem_manage_init() local
1092 ARG_UNUSED(addr); in z_mem_manage_init()
1102 /* All pages composing the Zephyr image are mapped at boot in a in z_mem_manage_init()
1105 VIRT_FOREACH(K_MEM_KERNEL_VIRT_START, K_MEM_KERNEL_VIRT_SIZE, addr) in z_mem_manage_init()
1107 pf = k_mem_phys_to_page_frame(K_MEM_BOOT_VIRT_TO_PHYS(addr)); in z_mem_manage_init()
1108 frame_mapped_set(pf, addr); in z_mem_manage_init()
1111 * currently tested with anonymously-mapped pages which are not in z_mem_manage_init()
1118 * handling, page-ins, etc. in z_mem_manage_init()
1126 * boot process. Will be un-pinned once boot process completes. in z_mem_manage_init()
1136 /* Any remaining pages that aren't mapped, reserved, or pinned get in z_mem_manage_init()
1239 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_in()
1270 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_out()
1291 static void virt_region_foreach(void *addr, size_t size, in virt_region_foreach() argument
1294 k_mem_assert_virtual_region(addr, size); in virt_region_foreach()
1297 func((uint8_t *)addr + offset); in virt_region_foreach()
1307 * - Map page frame to scratch area if requested. This always is true if we're
1310 * - If mapped:
1311 * - obtain backing store location and populate location parameter
1312 * - Update page tables with location
1313 * - Mark page frame as busy
1315 * Returns -ENOMEM if the backing store is full
1332 * wasn't pre-populated with this data page. in page_frame_prepare_locked()
1352 return -ENOMEM; in page_frame_prepare_locked()
1360 /* Shouldn't happen unless this function is mis-used */ in page_frame_prepare_locked()
1361 __ASSERT(!dirty, "un-mapped page determined to be dirty"); in page_frame_prepare_locked()
1377 static int do_mem_evict(void *addr) in do_mem_evict() argument
1397 flags = arch_page_info_get(addr, &phys, false); in do_mem_evict()
1399 "address %p isn't mapped", addr); in do_mem_evict()
1401 /* Un-mapped or already evicted. Nothing to do */ in do_mem_evict()
1408 __ASSERT(k_mem_page_frame_to_virt(pf) == addr, "page frame address mismatch"); in do_mem_evict()
1437 int k_mem_page_out(void *addr, size_t size) in k_mem_page_out() argument
1440 addr); in k_mem_page_out()
1441 k_mem_assert_virtual_region(addr, size); in k_mem_page_out()
1444 void *pos = (uint8_t *)addr + offset; in k_mem_page_out()
1469 * data page to page-in, see comments in that function. in k_mem_page_frame_evict()
1535 faulting_thread->paging_stats.pagefaults.cnt++; in paging_stats_faults_inc()
1538 faulting_thread->paging_stats.pagefaults.irq_unlocked++; in paging_stats_faults_inc()
1540 faulting_thread->paging_stats.pagefaults.irq_locked++; in paging_stats_faults_inc()
1551 faulting_thread->paging_stats.pagefaults.in_isr++; in paging_stats_faults_inc()
1569 faulting_thread->paging_stats.eviction.dirty++; in paging_stats_eviction_inc()
1571 faulting_thread->paging_stats.eviction.clean++; in paging_stats_eviction_inc()
1604 time_diff = k_cycle_get_32() - time_start; in do_eviction_select()
1613 static bool do_page_fault(void *addr, bool pin) in do_page_fault() argument
1625 addr); in do_page_fault()
1627 LOG_DBG("page fault at %p", addr); in do_page_fault()
1631 * - k_mem_paging_eviction_select() metrics in do_page_fault()
1637 * We do re-enable interrupts during the page-in/page-out operation in do_page_fault()
1651 * scheduled during the page-in/out operation. Support for in do_page_fault()
1665 * and k_sched_lock() is equivalent to a no-op on SMP anyway. in do_page_fault()
1679 status = arch_page_location_get(addr, &page_in_location); in do_page_fault()
1701 /* This if-block is to pin the page if it is in do_page_fault()
1744 frame_mapped_set(pf, addr); in do_page_fault()
1749 arch_mem_page_in(addr, k_mem_page_frame_to_phys(pf)); in do_page_fault()
1767 static void do_page_in(void *addr) in do_page_in() argument
1771 ret = do_page_fault(addr, false); in do_page_in()
1772 __ASSERT(ret, "unmapped memory address %p", addr); in do_page_in()
1776 void k_mem_page_in(void *addr, size_t size) in k_mem_page_in() argument
1781 virt_region_foreach(addr, size, do_page_in); in k_mem_page_in()
1784 static void do_mem_pin(void *addr) in do_mem_pin() argument
1788 ret = do_page_fault(addr, true); in do_mem_pin()
1789 __ASSERT(ret, "unmapped memory address %p", addr); in do_mem_pin()
1793 void k_mem_pin(void *addr, size_t size) in k_mem_pin() argument
1798 virt_region_foreach(addr, size, do_mem_pin); in k_mem_pin()
1801 bool k_mem_page_fault(void *addr) in k_mem_page_fault() argument
1803 return do_page_fault(addr, false); in k_mem_page_fault()
1806 static void do_mem_unpin(void *addr) in do_mem_unpin() argument
1813 flags = arch_page_info_get(addr, &phys, false); in do_mem_unpin()
1815 "invalid data page at %p", addr); in do_mem_unpin()
1829 void k_mem_unpin(void *addr, size_t size) in k_mem_unpin() argument
1832 addr); in k_mem_unpin()
1833 virt_region_foreach(addr, size, do_mem_unpin); in k_mem_unpin()