Lines Matching +full:active +full:- +full:range +full:- +full:x +full:- +full:max
4 * SPDX-License-Identifier: Apache-2.0
17 #include <zephyr/linker/linker-defs.h>
31 * - A page frame is a page-sized physical memory region in RAM. It is a
37 * - A data page is a page-sized region of data. It may exist in a page frame,
76 #define COLOR(x) printk(_CONCAT(ANSI_, x)) argument
78 #define COLOR(x) do { } while (false) argument
86 printk("-"); in page_frame_dump()
113 printk("Physical memory from 0x%lx to 0x%lx\n", in k_mem_page_frames_dump()
160 * +--------------+ <- K_MEM_VIRT_RAM_START
161 * | Undefined VM | <- May contain ancillary regions like x86_64's locore
162 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
168 * +--------------+ <- K_MEM_VM_FREE_START
173 * |..............| <- mapping_pos (grows downward as more mappings are made)
175 * +--------------+
177 * +--------------+
179 * +--------------+
181 * +--------------+ <- mappings start here
182 * | Reserved | <- special purpose virtual page(s) of size K_MEM_VM_RESERVED
183 * +--------------+ <- K_MEM_VIRT_RAM_END
199 #define Z_VIRT_REGION_END_ADDR (K_MEM_VIRT_RAM_END - K_MEM_VM_RESERVED)
204 - (offset * CONFIG_MMU_PAGE_SIZE) - size; in virt_from_bitmap_offset()
210 - POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE; in virt_to_bitmap_offset()
231 - POINTER_TO_UINT(K_MEM_VIRT_RAM_START); in virt_region_init()
256 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR), in virt_region_free()
259 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
274 (((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) && in virt_region_free()
275 ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
276 uint8_t *adjusted_start = MAX(vaddr_u8, Z_VIRT_REGION_START_ADDR); in virt_region_free()
279 size_t adjusted_sz = adjusted_end - adjusted_start; in virt_region_free()
301 num_bits = (size + align - CONFIG_MMU_PAGE_SIZE) / CONFIG_MMU_PAGE_SIZE; in virt_region_alloc()
322 * +--------------+ <- K_MEM_VIRT_RAM_START in virt_region_alloc()
324 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START) in virt_region_alloc()
330 * +--------------+ <- K_MEM_VM_FREE_START in virt_region_alloc()
332 * +==============+ <- dest_addr in virt_region_alloc()
334 * |..............| <- aligned_dest_addr in virt_region_alloc()
339 * |..............| <- aligned_dest_addr + size in virt_region_alloc()
341 * +==============+ <- offset from K_MEM_VIRT_RAM_END == dest_addr + alloc_size in virt_region_alloc()
343 * +--------------+ in virt_region_alloc()
345 * +--------------+ in virt_region_alloc()
347 * +--------------+ <- K_MEM_VIRT_RAM_END in virt_region_alloc()
352 aligned_dest_addr - dest_addr); in virt_region_alloc()
353 if (((dest_addr + alloc_size) - (aligned_dest_addr + size)) > 0) { in virt_region_alloc()
355 (dest_addr + alloc_size) - (aligned_dest_addr + size)); in virt_region_alloc()
379 * However, there are use-cases to consolidate free pages such that entire
382 * which are still active.
395 __ASSERT(expr, "page frame 0x%lx: " fmt, k_mem_page_frame_to_phys(pf), \
406 z_free_page_count--; in free_page_frame_list_get()
410 pf->va_and_flags = 0; in free_page_frame_list_get()
422 sys_sfnode_init(&pf->node, K_MEM_PAGE_FRAME_FREE); in free_page_frame_list_put()
423 sys_sflist_append(&free_page_frame_list, &pf->node); in free_page_frame_list_put()
434 pf->va_and_flags = 0; in page_frame_free_locked()
454 * This is uncommon, use-cases are for things like the in frame_mapped_set()
458 "non-pinned and already mapped to %p", in frame_mapped_set()
461 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1; in frame_mapped_set()
464 pf->va_and_flags &= flags_mask; in frame_mapped_set()
465 pf->va_and_flags |= va | K_MEM_PAGE_FRAME_MAPPED; in frame_mapped_set()
477 * @retval -EFAULT if virtual address is not mapped
483 int ret = -EFAULT; in virt_to_page_frame()
513 * TODO: Add optional support for copy-on-write mappings to a zero page instead
516 * page-ins as memory is mapped and physical RAM or backing store storage will
535 LOG_DBG("evicting %p at 0x%lx", in map_anon_page()
540 return -ENOMEM; in map_anon_page()
545 pf->va_and_flags = 0; in map_anon_page()
547 return -ENOMEM; in map_anon_page()
564 LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys); in map_anon_page()
635 LOG_DBG("memory mapping anon pages %p to %p unpaged", dst, pos-1); in k_mem_map_phys_guard()
646 * call k_mem_unmap(dst, pos - dst) in k_mem_map_phys_guard()
669 /* If we later implement mappings to a copy-on-write in k_mem_map_phys_guard()
690 /* Make sure address range is still valid after accounting in k_mem_unmap_phys_guard()
693 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
703 ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL); in k_mem_unmap_phys_guard()
764 "%s: 0x%lx is not a page frame", __func__, phys); in k_mem_unmap_phys_guard()
777 "%s: 0x%lx is not a mapped page frame", __func__, phys); in k_mem_unmap_phys_guard()
801 * have been unmapped. We just need to unmapped the in-between in k_mem_unmap_phys_guard()
810 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
838 /* TODO: detect and handle paged-out memory as well */ in k_mem_update_flags()
858 ret = z_free_page_count - CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE; in k_mem_free_get()
901 __ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled"); in k_mem_map_phys_bare()
906 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys); in k_mem_map_phys_bare()
907 __ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)), in k_mem_map_phys_bare()
908 "wraparound for physical address 0x%lx (size %zu)", in k_mem_map_phys_bare()
928 (uintptr_t)(K_MEM_VIRT_RAM_END - 1)) || in k_mem_map_phys_bare()
929 IN_RANGE(aligned_phys + aligned_size - 1, in k_mem_map_phys_bare()
931 (uintptr_t)(K_MEM_VIRT_RAM_END - 1))) { in k_mem_map_phys_bare()
932 uint8_t *adjusted_start = MAX(dest_addr, K_MEM_VIRT_RAM_START); in k_mem_map_phys_bare()
935 size_t adjusted_sz = adjusted_end - adjusted_start; in k_mem_map_phys_bare()
954 ((uintptr_t)dest_addr + (size - 1)), in k_mem_map_phys_bare()
958 LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr, in k_mem_map_phys_bare()
967 /* May re-visit this in the future, but for now running out of in k_mem_map_phys_bare()
974 LOG_ERR("memory mapping 0x%lx (size %zu, flags 0x%x) failed", in k_mem_map_phys_bare()
988 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_virt); in k_mem_unmap_phys_bare()
989 __ASSERT(aligned_virt < (aligned_virt + (aligned_size - 1)), in k_mem_unmap_phys_bare()
990 "wraparound for virtual address 0x%lx (size %zu)", in k_mem_unmap_phys_bare()
995 LOG_DBG("arch_mem_unmap(0x%lx, %zu) offset %lu", in k_mem_unmap_phys_bare()
1012 /* The actual mapped region must be page-aligned. Round down the in k_mem_region_align()
1016 addr_offset = addr - *aligned_addr; in k_mem_region_align()
1033 size_t pinned_size = pinned_end - pinned_start; in mark_linker_section_pinned()
1111 * currently tested with anonymously-mapped pages which are not in z_mem_manage_init()
1118 * handling, page-ins, etc. in z_mem_manage_init()
1126 * boot process. Will be un-pinned once boot process completes. in z_mem_manage_init()
1239 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_in()
1270 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_out()
1307 * - Map page frame to scratch area if requested. This always is true if we're
1310 * - If mapped:
1311 * - obtain backing store location and populate location parameter
1312 * - Update page tables with location
1313 * - Mark page frame as busy
1315 * Returns -ENOMEM if the backing store is full
1325 __ASSERT(!k_mem_page_frame_is_pinned(pf), "page frame 0x%lx is pinned", in page_frame_prepare_locked()
1332 * wasn't pre-populated with this data page. in page_frame_prepare_locked()
1352 return -ENOMEM; in page_frame_prepare_locked()
1360 /* Shouldn't happen unless this function is mis-used */ in page_frame_prepare_locked()
1361 __ASSERT(!dirty, "un-mapped page determined to be dirty"); in page_frame_prepare_locked()
1365 __ASSERT(!k_mem_page_frame_is_busy(pf), "page frame 0x%lx is already busy", in page_frame_prepare_locked()
1401 /* Un-mapped or already evicted. Nothing to do */ in do_mem_evict()
1465 __ASSERT(page_frames_initialized, "%s called on 0x%lx too early", in k_mem_page_frame_evict()
1469 * data page to page-in, see comments in that function. in k_mem_page_frame_evict()
1535 faulting_thread->paging_stats.pagefaults.cnt++; in paging_stats_faults_inc()
1538 faulting_thread->paging_stats.pagefaults.irq_unlocked++; in paging_stats_faults_inc()
1540 faulting_thread->paging_stats.pagefaults.irq_locked++; in paging_stats_faults_inc()
1551 faulting_thread->paging_stats.pagefaults.in_isr++; in paging_stats_faults_inc()
1569 faulting_thread->paging_stats.eviction.dirty++; in paging_stats_eviction_inc()
1571 faulting_thread->paging_stats.eviction.clean++; in paging_stats_eviction_inc()
1604 time_diff = k_cycle_get_32() - time_start; in do_eviction_select()
1631 * - k_mem_paging_eviction_select() metrics in do_page_fault()
1637 * We do re-enable interrupts during the page-in/page-out operation in do_page_fault()
1651 * scheduled during the page-in/out operation. Support for in do_page_fault()
1665 * and k_sched_lock() is equivalent to a no-op on SMP anyway. in do_page_fault()
1701 /* This if-block is to pin the page if it is in do_page_fault()
1718 LOG_DBG("evicting %p at 0x%lx", in do_page_fault()