Lines Matching refs:CONFIG_MMU_PAGE_SIZE

137 	     (_pos) < ((uint8_t *)(_base) + (_size)); (_pos) += CONFIG_MMU_PAGE_SIZE)
141 (_pos) < ((uintptr_t)(_base) + (_size)); (_pos) += CONFIG_MMU_PAGE_SIZE)
194 CONFIG_KERNEL_VM_SIZE / CONFIG_MMU_PAGE_SIZE);
204 - (offset * CONFIG_MMU_PAGE_SIZE) - size; in virt_from_bitmap_offset()
210 - POINTER_TO_UINT(vaddr) - size) / CONFIG_MMU_PAGE_SIZE; in virt_to_bitmap_offset()
224 num_bits = K_MEM_VM_RESERVED / CONFIG_MMU_PAGE_SIZE; in virt_region_init()
233 num_bits /= CONFIG_MMU_PAGE_SIZE; in virt_region_init()
264 num_bits = size / CONFIG_MMU_PAGE_SIZE; in virt_region_free()
282 num_bits = adjusted_sz / CONFIG_MMU_PAGE_SIZE; in virt_region_free()
301 num_bits = (size + align - CONFIG_MMU_PAGE_SIZE) / CONFIG_MMU_PAGE_SIZE; in virt_region_alloc()
302 alloc_size = num_bits * CONFIG_MMU_PAGE_SIZE; in virt_region_alloc()
461 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1; in frame_mapped_set()
552 arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags); in map_anon_page()
588 if ((size % CONFIG_MMU_PAGE_SIZE) != 0U) { in k_mem_map_phys_guard()
600 if (size_add_overflow(size, CONFIG_MMU_PAGE_SIZE * 2, &total_size)) { in k_mem_map_phys_guard()
607 dst = virt_region_alloc(total_size, CONFIG_MMU_PAGE_SIZE); in k_mem_map_phys_guard()
616 arch_mem_unmap(dst, CONFIG_MMU_PAGE_SIZE); in k_mem_map_phys_guard()
617 arch_mem_unmap(dst + CONFIG_MMU_PAGE_SIZE + size, in k_mem_map_phys_guard()
618 CONFIG_MMU_PAGE_SIZE); in k_mem_map_phys_guard()
621 dst += CONFIG_MMU_PAGE_SIZE; in k_mem_map_phys_guard()
633 CONFIG_MMU_PAGE_SIZE, flags); in k_mem_map_phys_guard()
688 __ASSERT_NO_MSG(POINTER_TO_UINT(addr) >= CONFIG_MMU_PAGE_SIZE); in k_mem_unmap_phys_guard()
693 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
694 k_mem_assert_virtual_region(pos, size + (CONFIG_MMU_PAGE_SIZE * 2)); in k_mem_unmap_phys_guard()
703 ret = arch_page_phys_get(pos - CONFIG_MMU_PAGE_SIZE, NULL); in k_mem_unmap_phys_guard()
734 arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE); in k_mem_unmap_phys_guard()
785 arch_mem_unmap(pos, CONFIG_MMU_PAGE_SIZE); in k_mem_unmap_phys_guard()
810 pos = (uint8_t *)addr - CONFIG_MMU_PAGE_SIZE; in k_mem_unmap_phys_guard()
811 total_size = size + (CONFIG_MMU_PAGE_SIZE * 2); in k_mem_unmap_phys_guard()
867 return ret * (size_t)CONFIG_MMU_PAGE_SIZE; in k_mem_free_get()
882 return CONFIG_MMU_PAGE_SIZE; in virt_region_align()
905 CONFIG_MMU_PAGE_SIZE); in k_mem_map_phys_bare()
937 num_bits = adjusted_sz / CONFIG_MMU_PAGE_SIZE; in k_mem_map_phys_bare()
987 CONFIG_MMU_PAGE_SIZE); in k_mem_unmap_phys_bare()
1030 CONFIG_MMU_PAGE_SIZE); in mark_linker_section_pinned()
1032 CONFIG_MMU_PAGE_SIZE); in mark_linker_section_pinned()
1067 arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); in z_paging_ondemand_section_map()
1069 virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); in z_paging_ondemand_section_map()
1076 arch_mem_map(addr, location, CONFIG_MMU_PAGE_SIZE, flags); in z_paging_ondemand_section_map()
1078 virt_to_bitmap_offset(addr, CONFIG_MMU_PAGE_SIZE)); in z_paging_ondemand_section_map()
1208 memset(K_MEM_SCRATCH_PAGE, 0, CONFIG_MMU_PAGE_SIZE); in do_backing_store_page_in()
1296 for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) { in virt_region_foreach()
1443 for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) { in k_mem_page_out()