1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8 #define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9 
10 #include <zephyr/sys/util.h>
11 #include <zephyr/toolchain.h>
12 #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
13 #include <zephyr/arch/arm64/arm_mem.h>
14 #endif
15 
16 /**
17  * @brief Memory Management
18  * @defgroup memory_management Memory Management
19  * @ingroup os_services
20  * @{
21  * @}
22  */
23 
24 /*
25  * Caching mode definitions. These are mutually exclusive.
26  */
27 
28 /** No caching. Most drivers want this. */
29 #define K_MEM_CACHE_NONE	2
30 
31 /** Write-through caching. Used by certain drivers. */
32 #define K_MEM_CACHE_WT		1
33 
34 /** Full write-back caching. Any RAM mapped wants this. */
35 #define K_MEM_CACHE_WB		0
36 
37 /*
38  * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
39  * pay attention to be not conflicted when updating these flags.
40  */
41 
42 /** Reserved bits for cache modes in k_map() flags argument */
43 #define K_MEM_CACHE_MASK	(BIT(3) - 1)
44 
45 /*
46  * Region permission attributes. Default is read-only, no user, no exec
47  */
48 
49 /** Region will have read/write access (and not read-only) */
50 #define K_MEM_PERM_RW		BIT(3)
51 
52 /** Region will be executable (normally forbidden) */
53 #define K_MEM_PERM_EXEC		BIT(4)
54 
55 /** Region will be accessible to user mode (normally supervisor-only) */
56 #define K_MEM_PERM_USER		BIT(5)
57 
58 /*
59  * Region mapping behaviour attributes
60  */
61 
62 /** Region will be mapped to 1:1 virtual and physical address */
63 #define K_MEM_DIRECT_MAP	BIT(6)
64 
65 /*
66  * This is the offset to subtract from a virtual address mapped in the
67  * kernel's permanent mapping of RAM, to obtain its physical address.
68  *
69  *     virt_addr = phys_addr + Z_MEM_VM_OFFSET
70  *
71  * This only works for virtual addresses within the interval
72  * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
73  *
74  * These macros are intended for assembly, linker code, and static initializers.
75  * Use with care.
76  *
77  * Note that when demand paging is active, these will only work with page
78  * frames that are pinned to their virtual mapping at boot.
79  *
80  * TODO: This will likely need to move to an arch API or need additional
81  * constraints defined.
82  */
83 #ifdef CONFIG_MMU
84 #define Z_MEM_VM_OFFSET	((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
85 			 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
86 #else
87 #define Z_MEM_VM_OFFSET	0
88 #endif
89 
90 #define Z_MEM_PHYS_ADDR(virt)	((virt) - Z_MEM_VM_OFFSET)
91 #define Z_MEM_VIRT_ADDR(phys)	((phys) + Z_MEM_VM_OFFSET)
92 
93 #if Z_MEM_VM_OFFSET != 0
94 #define Z_VM_KERNEL 1
95 #ifdef CONFIG_XIP
96 #error "XIP and a virtual memory kernel are not allowed"
97 #endif
98 #endif
99 
100 #ifndef _ASMLANGUAGE
101 #include <stdint.h>
102 #include <stddef.h>
103 #include <inttypes.h>
104 #include <zephyr/sys/__assert.h>
105 
106 struct k_mem_paging_stats_t {
107 #ifdef CONFIG_DEMAND_PAGING_STATS
108 	struct {
109 		/** Number of page faults */
110 		unsigned long			cnt;
111 
112 		/** Number of page faults with IRQ locked */
113 		unsigned long			irq_locked;
114 
115 		/** Number of page faults with IRQ unlocked */
116 		unsigned long			irq_unlocked;
117 
118 #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
119 		/** Number of page faults while in ISR */
120 		unsigned long			in_isr;
121 #endif
122 	} pagefaults;
123 
124 	struct {
125 		/** Number of clean pages selected for eviction */
126 		unsigned long			clean;
127 
128 		/** Number of dirty pages selected for eviction */
129 		unsigned long			dirty;
130 	} eviction;
131 #endif /* CONFIG_DEMAND_PAGING_STATS */
132 };
133 
134 struct k_mem_paging_histogram_t {
135 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
136 	/* Counts for each bin in timing histogram */
137 	unsigned long	counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
138 
139 	/* Bounds for the bins in timing histogram,
140 	 * excluding the first and last (hence, NUM_SLOTS - 1).
141 	 */
142 	unsigned long	bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
143 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
144 };
145 
146 /* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
z_mem_phys_addr(void * virt)147 static inline uintptr_t z_mem_phys_addr(void *virt)
148 {
149 	uintptr_t addr = (uintptr_t)virt;
150 
151 #ifdef CONFIG_MMU
152 	__ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
153 		 (addr < (CONFIG_KERNEL_VM_BASE +
154 			  (CONFIG_KERNEL_VM_SIZE))),
155 		 "address %p not in permanent mappings", virt);
156 #else
157 	/* Should be identity-mapped */
158 	__ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
159 		 (addr < (CONFIG_SRAM_BASE_ADDRESS +
160 			  (CONFIG_SRAM_SIZE * 1024UL))),
161 		 "physical address 0x%lx not in RAM",
162 		 (unsigned long)addr);
163 #endif /* CONFIG_MMU */
164 
165 	/* TODO add assertion that this page is pinned to boot mapping,
166 	 * the above checks won't be sufficient with demand paging
167 	 */
168 
169 	return Z_MEM_PHYS_ADDR(addr);
170 }
171 
172 /* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
z_mem_virt_addr(uintptr_t phys)173 static inline void *z_mem_virt_addr(uintptr_t phys)
174 {
175 	__ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
176 		 (phys < (CONFIG_SRAM_BASE_ADDRESS +
177 			  (CONFIG_SRAM_SIZE * 1024UL))),
178 		 "physical address 0x%lx not in RAM", (unsigned long)phys);
179 
180 	/* TODO add assertion that this page frame is pinned to boot mapping,
181 	 * the above check won't be sufficient with demand paging
182 	 */
183 
184 	return (void *)Z_MEM_VIRT_ADDR(phys);
185 }
186 
187 #ifdef __cplusplus
188 extern "C" {
189 #endif
190 
191 /**
192  * Map a physical memory region into the kernel's virtual address space
193  *
194  * This function is intended for mapping memory-mapped I/O regions into
195  * the virtual address space. Given a physical address and a size, return a
196  * linear address representing the base of where the physical region is mapped
197  * in the virtual address space for the Zephyr kernel.
198  *
199  * This function alters the active page tables in the area reserved
200  * for the kernel. This function will choose the virtual address
201  * and return it to the caller.
202  *
203  * Portable code should never assume that phys_addr and linear_addr will
204  * be equal.
205  *
206  * Caching and access properties are controlled by the 'flags' parameter.
207  * Unused bits in 'flags' are reserved for future expansion.
208  * A caching mode must be selected. By default, the region is read-only
209  * with user access and code execution forbidden. This policy is changed
210  * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter.
211  *
212  * If there is insufficient virtual address space for the mapping this will
213  * generate a kernel panic.
214  *
215  * This API is only available if CONFIG_MMU is enabled.
216  *
217  * It is highly discouraged to use this function to map system RAM page
218  * frames. It may conflict with anonymous memory mappings and demand paging
219  * and produce undefined behavior.  Do not use this for RAM unless you know
220  * exactly what you are doing. If you need a chunk of memory, use k_mem_map().
221  * If you need a contiguous buffer of physical memory, statically declare it
222  * and pin it at build time, it will be mapped when the system boots.
223  *
224  * This API is part of infrastructure still under development and may
225  * change.
226  *
227  * @param virt [out] Output virtual address storage location
228  * @param phys Physical address base of the memory region
229  * @param size Size of the memory region
230  * @param flags Caching mode and access flags, see K_MAP_* macros
231  */
232 void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
233 		uint32_t flags);
234 
235 /**
236  * Unmap a virtual memory region from kernel's virtual address space.
237  *
238  * This function is intended to be used by drivers and early boot routines
239  * where temporary memory mappings need to be made. This allows these
240  * memory mappings to be discarded once they are no longer needed.
241  *
242  * This function alters the active page tables in the area reserved
243  * for the kernel.
244  *
245  * This will align the input parameters to page boundaries so that
246  * this can be used with the virtual address as returned by
247  * z_phys_map().
248  *
249  * This API is only available if CONFIG_MMU is enabled.
250  *
251  * It is highly discouraged to use this function to unmap memory mappings.
252  * It may conflict with anonymous memory mappings and demand paging and
253  * produce undefined behavior. Do not use this unless you know exactly
254  * what you are doing.
255  *
256  * This API is part of infrastructure still under development and may
257  * change.
258  *
259  * @param virt Starting address of the virtual address region to be unmapped.
260  * @param size Size of the virtual address region
261  */
262 void z_phys_unmap(uint8_t *virt, size_t size);
263 
264 /*
265  * k_mem_map() control flags
266  */
267 
268 /**
269  * @brief The mapped region is not guaranteed to be zeroed.
270  *
271  * This may improve performance. The associated page frames may contain
272  * indeterminate data, zeroes, or even sensitive information.
273  *
274  * This may not be used with K_MEM_PERM_USER as there are no circumstances
275  * where this is safe.
276  */
277 #define K_MEM_MAP_UNINIT	BIT(16)
278 
279 /**
280  * Region will be pinned in memory and never paged
281  *
282  * Such memory is guaranteed to never produce a page fault due to page-outs
283  * or copy-on-write once the mapping call has returned. Physical page frames
284  * will be pre-fetched as necessary and pinned.
285  */
286 #define K_MEM_MAP_LOCK		BIT(17)
287 
288 /**
289  * Return the amount of free memory available
290  *
291  * The returned value will reflect how many free RAM page frames are available.
292  * If demand paging is enabled, it may still be possible to allocate more.
293  *
294  * The information reported by this function may go stale immediately if
295  * concurrent memory mappings or page-ins take place.
296  *
297  * @return Free physical RAM, in bytes
298  */
299 size_t k_mem_free_get(void);
300 
301 /**
302  * Map anonymous memory into Zephyr's address space
303  *
304  * This function effectively increases the data space available to Zephyr.
305  * The kernel will choose a base virtual address and return it to the caller.
306  * The memory will have access permissions for all contexts set per the
307  * provided flags argument.
308  *
309  * If user thread access control needs to be managed in any way, do not enable
310  * K_MEM_PERM_USER flags here; instead manage the region's permissions
311  * with memory domain APIs after the mapping has been established. Setting
312  * K_MEM_PERM_USER here will allow all user threads to access this memory
313  * which is usually undesirable.
314  *
315  * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
316  *
317  * The mapped region is not guaranteed to be physically contiguous in memory.
318  * Physically contiguous buffers should be allocated statically and pinned
319  * at build time.
320  *
321  * Pages mapped in this way have write-back cache settings.
322  *
323  * The returned virtual memory pointer will be page-aligned. The size
324  * parameter, and any base address for re-mapping purposes must be page-
325  * aligned.
326  *
327  * Note that the allocation includes two guard pages immediately before
328  * and after the requested region. The total size of the allocation will be
329  * the requested size plus the size of these two guard pages.
330  *
331  * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
332  * function, with details in the documentation for these flags.
333  *
334  * @param size Size of the memory mapping. This must be page-aligned.
335  * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
336  * @return The mapped memory location, or NULL if insufficient virtual address
337  *         space, insufficient physical memory to establish the mapping,
338  *         or insufficient memory for paging structures.
339  */
340 void *k_mem_map(size_t size, uint32_t flags);
341 
342 /**
343  * Un-map mapped memory
344  *
345  * This removes a memory mapping for the provided page-aligned region.
346  * Associated page frames will be free and the kernel may re-use the associated
347  * virtual address region. Any paged out data pages may be discarded.
348  *
349  * Calling this function on a region which was not mapped to begin with is
350  * undefined behavior.
351  *
352  * @param addr Page-aligned memory region base virtual address
353  * @param size Page-aligned memory region size
354  */
355 void k_mem_unmap(void *addr, size_t size);
356 
357 /**
358  * Given an arbitrary region, provide a aligned region that covers it
359  *
360  * The returned region will have both its base address and size aligned
361  * to the provided alignment value.
362  *
363  * @param aligned_addr [out] Aligned address
364  * @param aligned_size [out] Aligned region size
365  * @param addr Region base address
366  * @param size Region size
367  * @param align What to align the address and size to
368  * @retval offset between aligned_addr and addr
369  */
370 size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
371 			  uintptr_t addr, size_t size, size_t align);
372 
373 /**
374  * @defgroup demand_paging Demand Paging
375  * @ingroup memory_management
376  */
377 /**
378  * @defgroup mem-demand-paging Demand Paging APIs
379  * @ingroup demand_paging
380  * @{
381  */
382 
383 /**
384  * Evict a page-aligned virtual memory region to the backing store
385  *
386  * Useful if it is known that a memory region will not be used for some time.
387  * All the data pages within the specified region will be evicted to the
388  * backing store if they weren't already, with their associated page frames
389  * marked as available for mappings or page-ins.
390  *
391  * None of the associated page frames mapped to the provided region should
392  * be pinned.
393  *
394  * Note that there are no guarantees how long these pages will be evicted,
395  * they could take page faults immediately.
396  *
397  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
398  * called by ISRs as the backing store may be in-use.
399  *
400  * @param addr Base page-aligned virtual address
401  * @param size Page-aligned data region size
402  * @retval 0 Success
403  * @retval -ENOMEM Insufficient space in backing store to satisfy request.
404  *         The region may be partially paged out.
405  */
406 int k_mem_page_out(void *addr, size_t size);
407 
408 /**
409  * Load a virtual data region into memory
410  *
411  * After the function completes, all the page frames associated with this
412  * function will be paged in. However, they are not guaranteed to stay there.
413  * This is useful if the region is known to be used soon.
414  *
415  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
416  * called by ISRs as the backing store may be in-use.
417  *
418  * @param addr Base page-aligned virtual address
419  * @param size Page-aligned data region size
420  */
421 void k_mem_page_in(void *addr, size_t size);
422 
423 /**
424  * Pin an aligned virtual data region, paging in as necessary
425  *
426  * After the function completes, all the page frames associated with this
427  * region will be resident in memory and pinned such that they stay that way.
428  * This is a stronger version of z_mem_page_in().
429  *
430  * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
431  * called by ISRs as the backing store may be in-use.
432  *
433  * @param addr Base page-aligned virtual address
434  * @param size Page-aligned data region size
435  */
436 void k_mem_pin(void *addr, size_t size);
437 
438 /**
439  * Un-pin an aligned virtual data region
440  *
441  * After the function completes, all the page frames associated with this
442  * region will be no longer marked as pinned. This does not evict the region,
443  * follow this with z_mem_page_out() if you need that.
444  *
445  * @param addr Base page-aligned virtual address
446  * @param size Page-aligned data region size
447  */
448 void k_mem_unpin(void *addr, size_t size);
449 
450 /**
451  * Get the paging statistics since system startup
452  *
453  * This populates the paging statistics struct being passed in
454  * as argument.
455  *
456  * @param[in,out] stats Paging statistics struct to be filled.
457  */
458 __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
459 
460 struct k_thread;
461 /**
462  * Get the paging statistics since system startup for a thread
463  *
464  * This populates the paging statistics struct being passed in
465  * as argument for a particular thread.
466  *
467  * @param[in] thread Thread
468  * @param[in,out] stats Paging statistics struct to be filled.
469  */
470 __syscall
471 void k_mem_paging_thread_stats_get(struct k_thread *thread,
472 				   struct k_mem_paging_stats_t *stats);
473 
474 /**
475  * Get the eviction timing histogram
476  *
477  * This populates the timing histogram struct being passed in
478  * as argument.
479  *
480  * @param[in,out] hist Timing histogram struct to be filled.
481  */
482 __syscall void k_mem_paging_histogram_eviction_get(
483 	struct k_mem_paging_histogram_t *hist);
484 
485 /**
486  * Get the backing store page-in timing histogram
487  *
488  * This populates the timing histogram struct being passed in
489  * as argument.
490  *
491  * @param[in,out] hist Timing histogram struct to be filled.
492  */
493 __syscall void k_mem_paging_histogram_backing_store_page_in_get(
494 	struct k_mem_paging_histogram_t *hist);
495 
496 /**
497  * Get the backing store page-out timing histogram
498  *
499  * This populates the timing histogram struct being passed in
500  * as argument.
501  *
502  * @param[in,out] hist Timing histogram struct to be filled.
503  */
504 __syscall void k_mem_paging_histogram_backing_store_page_out_get(
505 	struct k_mem_paging_histogram_t *hist);
506 
507 #include <syscalls/mem_manage.h>
508 
509 /** @} */
510 
511 /**
512  * Eviction algorithm APIs
513  *
514  * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs
515  * @ingroup demand_paging
516  * @{
517  */
518 
519 /**
520  * Select a page frame for eviction
521  *
522  * The kernel will invoke this to choose a page frame to evict if there
523  * are no free page frames.
524  *
525  * This function will never be called before the initial
526  * k_mem_paging_eviction_init().
527  *
528  * This function is invoked with interrupts locked.
529  *
530  * @param [out] dirty Whether the page to evict is dirty
531  * @return The page frame to evict
532  */
533 struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
534 
535 /**
536  * Initialization function
537  *
538  * Called at POST_KERNEL to perform any necessary initialization tasks for the
539  * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be
540  * called until this has returned, and this will only be called once.
541  */
542 void k_mem_paging_eviction_init(void);
543 
544 /** @} */
545 
546 /**
547  * Backing store APIs
548  *
549  * @defgroup mem-demand-paging-backing-store Backing Store APIs
550  * @ingroup demand_paging
551  * @{
552  */
553 
554 /**
555  * Reserve or fetch a storage location for a data page loaded into a page frame
556  *
557  * The returned location token must be unique to the mapped virtual address.
558  * This location will be used in the backing store to page out data page
559  * contents for later retrieval. The location value must be page-aligned.
560  *
561  * This function may be called multiple times on the same data page. If its
562  * page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
563  * the previous backing store location for the data page containing a cached
564  * clean copy. This clean copy may be updated on page-out, or used to
565  * discard clean pages without needing to write out their contents.
566  *
567  * If the backing store is full, some other backing store location which caches
568  * a loaded data page may be selected, in which case its associated page frame
569  * will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
570  *
571  * pf->addr will indicate the virtual address the page is currently mapped to.
572  * Large, sparse backing stores which can contain the entire address space
573  * may simply generate location tokens purely as a function of pf->addr with no
574  * other management necessary.
575  *
576  * This function distinguishes whether it was called on behalf of a page
577  * fault. A free backing store location must always be reserved in order for
578  * page faults to succeed. If the page_fault parameter is not set, this
579  * function should return -ENOMEM even if one location is available.
580  *
581  * This function is invoked with interrupts locked.
582  *
583  * @param pf Virtual address to obtain a storage location
584  * @param [out] location storage location token
585  * @param page_fault Whether this request was for a page fault
586  * @return 0 Success
587  * @return -ENOMEM Backing store is full
588  */
589 int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
590 					    uintptr_t *location,
591 					    bool page_fault);
592 
593 /**
594  * Free a backing store location
595  *
596  * Any stored data may be discarded, and the location token associated with
597  * this address may be re-used for some other data page.
598  *
599  * This function is invoked with interrupts locked.
600  *
601  * @param location Location token to free
602  */
603 void k_mem_paging_backing_store_location_free(uintptr_t location);
604 
605 /**
606  * Copy a data page from Z_SCRATCH_PAGE to the specified location
607  *
608  * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
609  * to the intended source page frame for the calling context.
610  *
611  * Calls to this and k_mem_paging_backing_store_page_in() will always be
612  * serialized, but interrupts may be enabled.
613  *
614  * @param location Location token for the data page, for later retrieval
615  */
616 void k_mem_paging_backing_store_page_out(uintptr_t location);
617 
618 /**
619  * Copy a data page from the provided location to Z_SCRATCH_PAGE.
620  *
621  * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
622  * to the intended destination page frame for the calling context.
623  *
624  * Calls to this and k_mem_paging_backing_store_page_out() will always be
625  * serialized, but interrupts may be enabled.
626  *
627  * @param location Location token for the data page
628  */
629 void k_mem_paging_backing_store_page_in(uintptr_t location);
630 
631 /**
632  * Update internal accounting after a page-in
633  *
634  * This is invoked after k_mem_paging_backing_store_page_in() and interrupts
635  * have been* re-locked, making it safe to access the z_page_frame data.
636  * The location value will be the same passed to
637  * k_mem_paging_backing_store_page_in().
638  *
639  * The primary use-case for this is to update custom fields for the backing
640  * store in the page frame, to reflect where the data should be evicted to
641  * if it is paged out again. This may be a no-op in some implementations.
642  *
643  * If the backing store caches paged-in data pages, this is the appropriate
644  * time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
645  * out clean data pages if they are noted as clean in the page tables and the
646  * Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
647  *
648  * @param pf Page frame that was loaded in
649  * @param location Location of where the loaded data page was retrieved
650  */
651 void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
652 					      uintptr_t location);
653 
654 /**
655  * Backing store initialization function.
656  *
657  * The implementation may expect to receive page in/out calls as soon as this
658  * returns, but not before that. Called at POST_KERNEL.
659  *
660  * This function is expected to do two things:
661  * - Initialize any internal data structures and accounting for the backing
662  *   store.
663  * - If the backing store already contains all or some loaded kernel data pages
664  *   at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
665  *   associated page frames, and any internal accounting set up appropriately.
666  */
667 void k_mem_paging_backing_store_init(void);
668 
669 /** @} */
670 
671 #ifdef __cplusplus
672 }
673 #endif
674 
675 #endif /* !_ASMLANGUAGE */
676 #endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
677