Lines Matching +full:static +full:- +full:address

4  * SPDX-License-Identifier: Apache-2.0
16 #include <zephyr/linker/linker-defs.h>
18 /** Start address of physical memory. */
24 /** End address (exclusive) of physical memory. */
27 /** Start address of virtual memory. */
33 /** End address (exclusive) of virtual memory. */
36 /** Boot-time virtual start address of the kernel image. */
39 /** Boot-time virtual end address of the kernel image. */
42 /** Boot-time virtual address space size of the kernel image. */
43 #define K_MEM_KERNEL_VIRT_SIZE (K_MEM_KERNEL_VIRT_END - K_MEM_KERNEL_VIRT_START)
46 * @brief Offset for translating between static physical and virtual addresses.
51 ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
55 * @brief Get physical address from virtual address for boot RAM mappings.
61 * @param virt Virtual address.
63 * @return Physical address.
65 #define K_MEM_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)(virt)) - K_MEM_VM_OFFSET))
68 * @brief Get virtual address from physical address for boot RAM mappings.
74 * @param phys Physical address.
76 * @return Virtual address.
82 * @brief Start address of unused, available virtual addresses.
84 * This is the start address of the virtual memory region where
88 * - If it is enabled, which means all physical memory are mapped in virtual
89 * memory address space, and it is the same as
92 * - If it is disabled, K_MEM_VM_FREE_START is the same K_MEM_KERNEL_VIRT_END which
109 * is otherwise not application-facing.
130 * - K_MEM_PAGE_FRAME_FREE must be one of the possible sfnode flag bits
131 * - All bit values must be lower than CONFIG_MMU_PAGE_SIZE
144 * This physical page is mapped to some virtual memory address
152 * This page frame is currently involved in a page-in/out operation
170 * If mapped, K_MEM_PAGE_FRAME_* flags and virtual address
183 * require additional per-frame custom data for accounting purposes.
193 static inline bool k_mem_page_frame_is_free(struct k_mem_page_frame *pf) in k_mem_page_frame_is_free()
195 return (pf->va_and_flags & K_MEM_PAGE_FRAME_FREE) != 0U; in k_mem_page_frame_is_free()
198 static inline bool k_mem_page_frame_is_pinned(struct k_mem_page_frame *pf) in k_mem_page_frame_is_pinned()
200 return (pf->va_and_flags & K_MEM_PAGE_FRAME_PINNED) != 0U; in k_mem_page_frame_is_pinned()
203 static inline bool k_mem_page_frame_is_reserved(struct k_mem_page_frame *pf) in k_mem_page_frame_is_reserved()
205 return (pf->va_and_flags & K_MEM_PAGE_FRAME_RESERVED) != 0U; in k_mem_page_frame_is_reserved()
208 static inline bool k_mem_page_frame_is_mapped(struct k_mem_page_frame *pf) in k_mem_page_frame_is_mapped()
210 return (pf->va_and_flags & K_MEM_PAGE_FRAME_MAPPED) != 0U; in k_mem_page_frame_is_mapped()
213 static inline bool k_mem_page_frame_is_busy(struct k_mem_page_frame *pf) in k_mem_page_frame_is_busy()
215 return (pf->va_and_flags & K_MEM_PAGE_FRAME_BUSY) != 0U; in k_mem_page_frame_is_busy()
218 static inline bool k_mem_page_frame_is_backed(struct k_mem_page_frame *pf) in k_mem_page_frame_is_backed()
220 return (pf->va_and_flags & K_MEM_PAGE_FRAME_BACKED) != 0U; in k_mem_page_frame_is_backed()
223 static inline bool k_mem_page_frame_is_evictable(struct k_mem_page_frame *pf) in k_mem_page_frame_is_evictable()
236 static inline bool k_mem_page_frame_is_available(struct k_mem_page_frame *page) in k_mem_page_frame_is_available()
238 return page->va_and_flags == 0U; in k_mem_page_frame_is_available()
241 static inline void k_mem_page_frame_set(struct k_mem_page_frame *pf, uint8_t flags) in k_mem_page_frame_set()
243 pf->va_and_flags |= flags; in k_mem_page_frame_set()
246 static inline void k_mem_page_frame_clear(struct k_mem_page_frame *pf, uint8_t flags) in k_mem_page_frame_clear()
251 pf->va_and_flags &= ~wide_flags; in k_mem_page_frame_clear()
254 static inline void k_mem_assert_phys_aligned(uintptr_t phys) in k_mem_assert_phys_aligned()
257 "physical address 0x%lx is not page-aligned", phys); in k_mem_assert_phys_aligned()
263 static inline uintptr_t k_mem_page_frame_to_phys(struct k_mem_page_frame *pf) in k_mem_page_frame_to_phys()
265 return (uintptr_t)((pf - k_mem_page_frames) * CONFIG_MMU_PAGE_SIZE) + in k_mem_page_frame_to_phys()
269 /* Presumes there is but one mapping in the virtual address space */
270 static inline void *k_mem_page_frame_to_virt(struct k_mem_page_frame *pf) in k_mem_page_frame_to_virt()
272 uintptr_t flags_mask = CONFIG_MMU_PAGE_SIZE - 1; in k_mem_page_frame_to_virt()
274 return (void *)(pf->va_and_flags & ~flags_mask); in k_mem_page_frame_to_virt()
277 static inline bool k_mem_is_page_frame(uintptr_t phys) in k_mem_is_page_frame()
281 (uintptr_t)(K_MEM_PHYS_RAM_END - 1)); in k_mem_is_page_frame()
284 static inline struct k_mem_page_frame *k_mem_phys_to_page_frame(uintptr_t phys) in k_mem_phys_to_page_frame()
287 "0x%lx not an SRAM physical address", phys); in k_mem_phys_to_page_frame()
289 return &k_mem_page_frames[(phys - K_MEM_PHYS_RAM_START) / in k_mem_phys_to_page_frame()
293 static inline void k_mem_assert_virtual_region(uint8_t *addr, size_t size) in k_mem_assert_virtual_region()
303 ((uintptr_t)K_MEM_VIRT_RAM_END - 1)) && in k_mem_assert_virtual_region()
304 IN_RANGE(((uintptr_t)addr + size - 1), in k_mem_assert_virtual_region()
306 ((uintptr_t)K_MEM_VIRT_RAM_END - 1)), in k_mem_assert_virtual_region()
307 "invalid virtual address region %p (%zu)", addr, size); in k_mem_assert_virtual_region()
311 * @brief Pretty-print page frame information for all page frames.
313 * Debug function, pretty-print page frame information for all frames
331 /* We reserve a virtual page as a scratch area for page-ins/outs at the end
332 * of the address space
340 (uintptr_t)CONFIG_KERNEL_VM_SIZE - \
362 * Free a page frame physical address by evicting its contents
366 * marked as available for mappings or page-ins.
372 * called by ISRs as the backing store may be in-use.
374 * @param phys Page frame physical address
376 * @retval -ENOMEM Insufficient backing store space
388 * re-tried.
399 * preemptible. Races to page-in will be appropriately handled by the kernel.
401 * @param addr Faulting virtual address
404 * @retval false This page fault was from an un-mapped page, should
405 * be treated as an error, and not re-tried.