Lines Matching +full:lock +full:- +full:regions
4 * SPDX-License-Identifier: Apache-2.0
17 #include <zephyr/linker/linker-defs.h>
31 * - A page frame is a page-sized physical memory region in RAM. It is a
37 * - A data page is a page-sized region of data. It may exist in a page frame,
86 printk("-"); in page_frame_dump()
160 * +--------------+ <- K_MEM_VIRT_RAM_START
161 * | Undefined VM | <- May contain ancillary regions like x86_64's locore
162 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START)
168 * +--------------+ <- K_MEM_VM_FREE_START
173 * |..............| <- mapping_pos (grows downward as more mappings are made)
175 * +--------------+
177 * +--------------+
179 * +--------------+
181 * +--------------+ <- mappings start here
182 * | Reserved | <- special purpose virtual page(s) of size K_MEM_VM_RESERVED
183 * +--------------+ <- K_MEM_VIRT_RAM_END
199 #define Z_VIRT_REGION_END_ADDR (K_MEM_VIRT_RAM_END - K_MEM_VM_RESERVED)
204 - (offset * CONFIG_MMU_PAGE_SIZE) - size; in virt_from_bitmap_offset()
210 - POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE; in virt_to_bitmap_offset()
217 /* There are regions where we should never map via in virt_region_init()
231 - POINTER_TO_UINT(K_MEM_VIRT_RAM_START); in virt_region_init()
256 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR), in virt_region_free()
259 && ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
274 (((vaddr_u8 + size - 1) >= Z_VIRT_REGION_START_ADDR) && in virt_region_free()
275 ((vaddr_u8 + size - 1) < Z_VIRT_REGION_END_ADDR))) { in virt_region_free()
279 size_t adjusted_sz = adjusted_end - adjusted_start; in virt_region_free()
301 num_bits = (size + align - CONFIG_MMU_PAGE_SIZE) / CONFIG_MMU_PAGE_SIZE; in virt_region_alloc()
322 * +--------------+ <- K_MEM_VIRT_RAM_START in virt_region_alloc()
324 * +--------------+ <- K_MEM_KERNEL_VIRT_START (often == K_MEM_VIRT_RAM_START) in virt_region_alloc()
330 * +--------------+ <- K_MEM_VM_FREE_START in virt_region_alloc()
332 * +==============+ <- dest_addr in virt_region_alloc()
334 * |..............| <- aligned_dest_addr in virt_region_alloc()
339 * |..............| <- aligned_dest_addr + size in virt_region_alloc()
341 * +==============+ <- offset from K_MEM_VIRT_RAM_END == dest_addr + alloc_size in virt_region_alloc()
343 * +--------------+ in virt_region_alloc()
345 * +--------------+ in virt_region_alloc()
347 * +--------------+ <- K_MEM_VIRT_RAM_END in virt_region_alloc()
350 /* Free the two unused regions */ in virt_region_alloc()
352 aligned_dest_addr - dest_addr); in virt_region_alloc()
353 if (((dest_addr + alloc_size) - (aligned_dest_addr + size)) > 0) { in virt_region_alloc()
355 (dest_addr + alloc_size) - (aligned_dest_addr + size)); in virt_region_alloc()
379 * However, there are use-cases to consolidate free pages such that entire
406 z_free_page_count--; in free_page_frame_list_get()
410 pf->va_and_flags = 0; in free_page_frame_list_get()
422 sys_sfnode_init(&pf->node, K_MEM_PAGE_FRAME_FREE); in free_page_frame_list_put()
423 sys_sflist_append(&free_page_frame_list, &pf->node); in free_page_frame_list_put()
434 pf->va_and_flags = 0; in page_frame_free_locked()
454 * This is uncommon, use-cases are for things like the in frame_mapped_set()
458 "non-pinned and already mapped to %p", in frame_mapped_set()
461 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1; in frame_mapped_set()
464 pf->va_and_flags &= flags_mask; in frame_mapped_set()
465 pf->va_and_flags |= va | K_MEM_PAGE_FRAME_MAPPED; in frame_mapped_set()
477 * @retval -EFAULT if virtual address is not mapped
483 int ret = -EFAULT; in virt_to_page_frame()
513 * TODO: Add optional support for copy-on-write mappings to a zero page instead
516 * page-ins as memory is mapped and physical RAM or backing store storage will
524 bool lock = (flags & K_MEM_MAP_LOCK) != 0U; in map_anon_page() local
540 return -ENOMEM; in map_anon_page()
545 pf->va_and_flags = 0; in map_anon_page()
547 return -ENOMEM; in map_anon_page()
554 if (lock) { in map_anon_page()
559 if (IS_ENABLED(CONFIG_EVICTION_TRACKING) && (!lock)) { in map_anon_page()
564 LOG_DBG("memory mapping anon page %p -> 0x%lx", addr, phys); in map_anon_page()
635 LOG_DBG("memory mapping anon pages %p to %p unpaged", dst, pos-1); in k_mem_map_phys_guard()
646 * call k_mem_unmap(dst, pos - dst) in k_mem_map_phys_guard()
669 /* If we later implement mappings to a copy-on-write in k_mem_map_phys_guard()
693 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
703 ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL); in k_mem_unmap_phys_guard()
801 * have been unmapped. We just need to unmapped the in-between in k_mem_unmap_phys_guard()
810 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
838 /* TODO: detect and handle paged-out memory as well */ in k_mem_update_flags()
858 ret = z_free_page_count - CONFIG_DEMAND_PAGING_PAGE_FRAMES_RESERVE; in k_mem_free_get()
901 __ASSERT(!(flags & K_MEM_DIRECT_MAP), "The direct-map is not enabled"); in k_mem_map_phys_bare()
906 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_phys); in k_mem_map_phys_bare()
907 __ASSERT(aligned_phys < (aligned_phys + (aligned_size - 1)), in k_mem_map_phys_bare()
928 (uintptr_t)(K_MEM_VIRT_RAM_END - 1)) || in k_mem_map_phys_bare()
929 IN_RANGE(aligned_phys + aligned_size - 1, in k_mem_map_phys_bare()
931 (uintptr_t)(K_MEM_VIRT_RAM_END - 1))) { in k_mem_map_phys_bare()
935 size_t adjusted_sz = adjusted_end - adjusted_start; in k_mem_map_phys_bare()
954 ((uintptr_t)dest_addr + (size - 1)), in k_mem_map_phys_bare()
967 /* May re-visit this in the future, but for now running out of in k_mem_map_phys_bare()
988 __ASSERT(aligned_size != 0U, "0-length mapping at 0x%lx", aligned_virt); in k_mem_unmap_phys_bare()
989 __ASSERT(aligned_virt < (aligned_virt + (aligned_size - 1)), in k_mem_unmap_phys_bare()
1012 /* The actual mapped region must be page-aligned. Round down the in k_mem_region_align()
1016 addr_offset = addr - *aligned_addr; in k_mem_region_align()
1033 size_t pinned_size = pinned_end - pinned_start; in mark_linker_section_pinned()
1111 * currently tested with anonymously-mapped pages which are not in z_mem_manage_init()
1114 * We will need to setup linker regions for a subset of kernel in z_mem_manage_init()
1118 * handling, page-ins, etc. in z_mem_manage_init()
1126 * boot process. Will be un-pinned once boot process completes. in z_mem_manage_init()
1239 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_in()
1270 time_diff = k_cycle_get_32() - time_start; in do_backing_store_page_out()
1307 * - Map page frame to scratch area if requested. This always is true if we're
1310 * - If mapped:
1311 * - obtain backing store location and populate location parameter
1312 * - Update page tables with location
1313 * - Mark page frame as busy
1315 * Returns -ENOMEM if the backing store is full
1332 * wasn't pre-populated with this data page. in page_frame_prepare_locked()
1352 return -ENOMEM; in page_frame_prepare_locked()
1360 /* Shouldn't happen unless this function is mis-used */ in page_frame_prepare_locked()
1361 __ASSERT(!dirty, "un-mapped page determined to be dirty"); in page_frame_prepare_locked()
1401 /* Un-mapped or already evicted. Nothing to do */ in do_mem_evict()
1469 * data page to page-in, see comments in that function. in k_mem_page_frame_evict()
1535 faulting_thread->paging_stats.pagefaults.cnt++; in paging_stats_faults_inc()
1538 faulting_thread->paging_stats.pagefaults.irq_unlocked++; in paging_stats_faults_inc()
1540 faulting_thread->paging_stats.pagefaults.irq_locked++; in paging_stats_faults_inc()
1551 faulting_thread->paging_stats.pagefaults.in_isr++; in paging_stats_faults_inc()
1569 faulting_thread->paging_stats.eviction.dirty++; in paging_stats_eviction_inc()
1571 faulting_thread->paging_stats.eviction.clean++; in paging_stats_eviction_inc()
1604 time_diff = k_cycle_get_32() - time_start; in do_eviction_select()
1631 * - k_mem_paging_eviction_select() metrics in do_page_fault()
1637 * We do re-enable interrupts during the page-in/page-out operation in do_page_fault()
1650 * On UP we lock the scheduler so that other threads are never in do_page_fault()
1651 * scheduled during the page-in/out operation. Support for in do_page_fault()
1665 * and k_sched_lock() is equivalent to a no-op on SMP anyway. in do_page_fault()
1701 /* This if-block is to pin the page if it is in do_page_fault()