1 /*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8 #define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9
10 #include <zephyr/sys/util.h>
11 #include <zephyr/toolchain.h>
12 #if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
13 #include <zephyr/arch/arm64/arm_mem.h>
14 #endif
15
16 /**
17 * @brief Memory Management
18 * @defgroup memory_management Memory Management
19 * @ingroup os_services
20 * @{
21 * @}
22 */
23
24 /*
25 * Caching mode definitions. These are mutually exclusive.
26 */
27
28 /** No caching. Most drivers want this. */
29 #define K_MEM_CACHE_NONE 2
30
31 /** Write-through caching. Used by certain drivers. */
32 #define K_MEM_CACHE_WT 1
33
34 /** Full write-back caching. Any RAM mapped wants this. */
35 #define K_MEM_CACHE_WB 0
36
37 /*
38 * ARM64 Specific flags are defined in arch/arm64/arm_mem.h,
39 * pay attention to be not conflicted when updating these flags.
40 */
41
42 /** Reserved bits for cache modes in k_map() flags argument */
43 #define K_MEM_CACHE_MASK (BIT(3) - 1)
44
45 /*
46 * Region permission attributes. Default is read-only, no user, no exec
47 */
48
49 /** Region will have read/write access (and not read-only) */
50 #define K_MEM_PERM_RW BIT(3)
51
52 /** Region will be executable (normally forbidden) */
53 #define K_MEM_PERM_EXEC BIT(4)
54
55 /** Region will be accessible to user mode (normally supervisor-only) */
56 #define K_MEM_PERM_USER BIT(5)
57
58 /*
59 * Region mapping behaviour attributes
60 */
61
62 /** Region will be mapped to 1:1 virtual and physical address */
63 #define K_MEM_DIRECT_MAP BIT(6)
64
65 /*
66 * This is the offset to subtract from a virtual address mapped in the
67 * kernel's permanent mapping of RAM, to obtain its physical address.
68 *
69 * virt_addr = phys_addr + Z_MEM_VM_OFFSET
70 *
71 * This only works for virtual addresses within the interval
72 * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
73 *
74 * These macros are intended for assembly, linker code, and static initializers.
75 * Use with care.
76 *
77 * Note that when demand paging is active, these will only work with page
78 * frames that are pinned to their virtual mapping at boot.
79 *
80 * TODO: This will likely need to move to an arch API or need additional
81 * constraints defined.
82 */
83 #ifdef CONFIG_MMU
84 #define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
85 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
86 #else
87 #define Z_MEM_VM_OFFSET 0
88 #endif
89
90 #define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
91 #define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
92
93 #if Z_MEM_VM_OFFSET != 0
94 #define Z_VM_KERNEL 1
95 #ifdef CONFIG_XIP
96 #error "XIP and a virtual memory kernel are not allowed"
97 #endif
98 #endif
99
100 #ifndef _ASMLANGUAGE
101 #include <stdint.h>
102 #include <stddef.h>
103 #include <inttypes.h>
104 #include <zephyr/sys/__assert.h>
105
106 struct k_mem_paging_stats_t {
107 #ifdef CONFIG_DEMAND_PAGING_STATS
108 struct {
109 /** Number of page faults */
110 unsigned long cnt;
111
112 /** Number of page faults with IRQ locked */
113 unsigned long irq_locked;
114
115 /** Number of page faults with IRQ unlocked */
116 unsigned long irq_unlocked;
117
118 #ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
119 /** Number of page faults while in ISR */
120 unsigned long in_isr;
121 #endif
122 } pagefaults;
123
124 struct {
125 /** Number of clean pages selected for eviction */
126 unsigned long clean;
127
128 /** Number of dirty pages selected for eviction */
129 unsigned long dirty;
130 } eviction;
131 #endif /* CONFIG_DEMAND_PAGING_STATS */
132 };
133
134 struct k_mem_paging_histogram_t {
135 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
136 /* Counts for each bin in timing histogram */
137 unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
138
139 /* Bounds for the bins in timing histogram,
140 * excluding the first and last (hence, NUM_SLOTS - 1).
141 */
142 unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
143 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
144 };
145
146 /* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
z_mem_phys_addr(void * virt)147 static inline uintptr_t z_mem_phys_addr(void *virt)
148 {
149 uintptr_t addr = (uintptr_t)virt;
150
151 #ifdef CONFIG_MMU
152 __ASSERT(
153 #if CONFIG_KERNEL_VM_BASE != 0
154 (addr >= CONFIG_KERNEL_VM_BASE) &&
155 #endif
156 (addr < (CONFIG_KERNEL_VM_BASE +
157 (CONFIG_KERNEL_VM_SIZE))),
158 "address %p not in permanent mappings", virt);
159 #else
160 /* Should be identity-mapped */
161 __ASSERT(
162 #if CONFIG_SRAM_BASE_ADDRESS != 0
163 (addr >= CONFIG_SRAM_BASE_ADDRESS) &&
164 #endif
165 (addr < (CONFIG_SRAM_BASE_ADDRESS +
166 (CONFIG_SRAM_SIZE * 1024UL))),
167 "physical address 0x%lx not in RAM",
168 (unsigned long)addr);
169 #endif /* CONFIG_MMU */
170
171 /* TODO add assertion that this page is pinned to boot mapping,
172 * the above checks won't be sufficient with demand paging
173 */
174
175 return Z_MEM_PHYS_ADDR(addr);
176 }
177
178 /* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
z_mem_virt_addr(uintptr_t phys)179 static inline void *z_mem_virt_addr(uintptr_t phys)
180 {
181 __ASSERT(
182 #if CONFIG_SRAM_BASE_ADDRESS != 0
183 (phys >= CONFIG_SRAM_BASE_ADDRESS) &&
184 #endif
185 (phys < (CONFIG_SRAM_BASE_ADDRESS +
186 (CONFIG_SRAM_SIZE * 1024UL))),
187 "physical address 0x%lx not in RAM", (unsigned long)phys);
188
189 /* TODO add assertion that this page frame is pinned to boot mapping,
190 * the above check won't be sufficient with demand paging
191 */
192
193 return (void *)Z_MEM_VIRT_ADDR(phys);
194 }
195
196 #ifdef __cplusplus
197 extern "C" {
198 #endif
199
200 /**
201 * Map a physical memory region into the kernel's virtual address space
202 *
203 * This function is intended for mapping memory-mapped I/O regions into
204 * the virtual address space. Given a physical address and a size, return a
205 * linear address representing the base of where the physical region is mapped
206 * in the virtual address space for the Zephyr kernel.
207 *
208 * This function alters the active page tables in the area reserved
209 * for the kernel. This function will choose the virtual address
210 * and return it to the caller.
211 *
212 * Portable code should never assume that phys_addr and linear_addr will
213 * be equal.
214 *
215 * Caching and access properties are controlled by the 'flags' parameter.
216 * Unused bits in 'flags' are reserved for future expansion.
217 * A caching mode must be selected. By default, the region is read-only
218 * with user access and code execution forbidden. This policy is changed
219 * by passing K_MEM_CACHE_* and K_MEM_PERM_* macros into the 'flags' parameter.
220 *
221 * If there is insufficient virtual address space for the mapping this will
222 * generate a kernel panic.
223 *
224 * This API is only available if CONFIG_MMU is enabled.
225 *
226 * It is highly discouraged to use this function to map system RAM page
227 * frames. It may conflict with anonymous memory mappings and demand paging
228 * and produce undefined behavior. Do not use this for RAM unless you know
229 * exactly what you are doing. If you need a chunk of memory, use k_mem_map().
230 * If you need a contiguous buffer of physical memory, statically declare it
231 * and pin it at build time, it will be mapped when the system boots.
232 *
233 * This API is part of infrastructure still under development and may
234 * change.
235 *
236 * @param virt [out] Output virtual address storage location
237 * @param phys Physical address base of the memory region
238 * @param size Size of the memory region
239 * @param flags Caching mode and access flags, see K_MAP_* macros
240 */
241 void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
242 uint32_t flags);
243
244 /**
245 * Unmap a virtual memory region from kernel's virtual address space.
246 *
247 * This function is intended to be used by drivers and early boot routines
248 * where temporary memory mappings need to be made. This allows these
249 * memory mappings to be discarded once they are no longer needed.
250 *
251 * This function alters the active page tables in the area reserved
252 * for the kernel.
253 *
254 * This will align the input parameters to page boundaries so that
255 * this can be used with the virtual address as returned by
256 * z_phys_map().
257 *
258 * This API is only available if CONFIG_MMU is enabled.
259 *
260 * It is highly discouraged to use this function to unmap memory mappings.
261 * It may conflict with anonymous memory mappings and demand paging and
262 * produce undefined behavior. Do not use this unless you know exactly
263 * what you are doing.
264 *
265 * This API is part of infrastructure still under development and may
266 * change.
267 *
268 * @param virt Starting address of the virtual address region to be unmapped.
269 * @param size Size of the virtual address region
270 */
271 void z_phys_unmap(uint8_t *virt, size_t size);
272
273 /*
274 * k_mem_map() control flags
275 */
276
277 /**
278 * @brief The mapped region is not guaranteed to be zeroed.
279 *
280 * This may improve performance. The associated page frames may contain
281 * indeterminate data, zeroes, or even sensitive information.
282 *
283 * This may not be used with K_MEM_PERM_USER as there are no circumstances
284 * where this is safe.
285 */
286 #define K_MEM_MAP_UNINIT BIT(16)
287
288 /**
289 * Region will be pinned in memory and never paged
290 *
291 * Such memory is guaranteed to never produce a page fault due to page-outs
292 * or copy-on-write once the mapping call has returned. Physical page frames
293 * will be pre-fetched as necessary and pinned.
294 */
295 #define K_MEM_MAP_LOCK BIT(17)
296
297 /**
298 * Return the amount of free memory available
299 *
300 * The returned value will reflect how many free RAM page frames are available.
301 * If demand paging is enabled, it may still be possible to allocate more.
302 *
303 * The information reported by this function may go stale immediately if
304 * concurrent memory mappings or page-ins take place.
305 *
306 * @return Free physical RAM, in bytes
307 */
308 size_t k_mem_free_get(void);
309
310 /**
311 * Map anonymous memory into Zephyr's address space
312 *
313 * This function effectively increases the data space available to Zephyr.
314 * The kernel will choose a base virtual address and return it to the caller.
315 * The memory will have access permissions for all contexts set per the
316 * provided flags argument.
317 *
318 * If user thread access control needs to be managed in any way, do not enable
319 * K_MEM_PERM_USER flags here; instead manage the region's permissions
320 * with memory domain APIs after the mapping has been established. Setting
321 * K_MEM_PERM_USER here will allow all user threads to access this memory
322 * which is usually undesirable.
323 *
324 * Unless K_MEM_MAP_UNINIT is used, the returned memory will be zeroed.
325 *
326 * The mapped region is not guaranteed to be physically contiguous in memory.
327 * Physically contiguous buffers should be allocated statically and pinned
328 * at build time.
329 *
330 * Pages mapped in this way have write-back cache settings.
331 *
332 * The returned virtual memory pointer will be page-aligned. The size
333 * parameter, and any base address for re-mapping purposes must be page-
334 * aligned.
335 *
336 * Note that the allocation includes two guard pages immediately before
337 * and after the requested region. The total size of the allocation will be
338 * the requested size plus the size of these two guard pages.
339 *
340 * Many K_MEM_MAP_* flags have been implemented to alter the behavior of this
341 * function, with details in the documentation for these flags.
342 *
343 * @param size Size of the memory mapping. This must be page-aligned.
344 * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
345 * @return The mapped memory location, or NULL if insufficient virtual address
346 * space, insufficient physical memory to establish the mapping,
347 * or insufficient memory for paging structures.
348 */
349 void *k_mem_map(size_t size, uint32_t flags);
350
351 /**
352 * Un-map mapped memory
353 *
354 * This removes a memory mapping for the provided page-aligned region.
355 * Associated page frames will be free and the kernel may re-use the associated
356 * virtual address region. Any paged out data pages may be discarded.
357 *
358 * Calling this function on a region which was not mapped to begin with is
359 * undefined behavior.
360 *
361 * @param addr Page-aligned memory region base virtual address
362 * @param size Page-aligned memory region size
363 */
364 void k_mem_unmap(void *addr, size_t size);
365
366 /**
367 * Given an arbitrary region, provide a aligned region that covers it
368 *
369 * The returned region will have both its base address and size aligned
370 * to the provided alignment value.
371 *
372 * @param aligned_addr [out] Aligned address
373 * @param aligned_size [out] Aligned region size
374 * @param addr Region base address
375 * @param size Region size
376 * @param align What to align the address and size to
377 * @retval offset between aligned_addr and addr
378 */
379 size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
380 uintptr_t addr, size_t size, size_t align);
381
382 /**
383 * @defgroup demand_paging Demand Paging
384 * @ingroup memory_management
385 */
386 /**
387 * @defgroup mem-demand-paging Demand Paging APIs
388 * @ingroup demand_paging
389 * @{
390 */
391
392 /**
393 * Evict a page-aligned virtual memory region to the backing store
394 *
395 * Useful if it is known that a memory region will not be used for some time.
396 * All the data pages within the specified region will be evicted to the
397 * backing store if they weren't already, with their associated page frames
398 * marked as available for mappings or page-ins.
399 *
400 * None of the associated page frames mapped to the provided region should
401 * be pinned.
402 *
403 * Note that there are no guarantees how long these pages will be evicted,
404 * they could take page faults immediately.
405 *
406 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
407 * called by ISRs as the backing store may be in-use.
408 *
409 * @param addr Base page-aligned virtual address
410 * @param size Page-aligned data region size
411 * @retval 0 Success
412 * @retval -ENOMEM Insufficient space in backing store to satisfy request.
413 * The region may be partially paged out.
414 */
415 int k_mem_page_out(void *addr, size_t size);
416
417 /**
418 * Load a virtual data region into memory
419 *
420 * After the function completes, all the page frames associated with this
421 * function will be paged in. However, they are not guaranteed to stay there.
422 * This is useful if the region is known to be used soon.
423 *
424 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
425 * called by ISRs as the backing store may be in-use.
426 *
427 * @param addr Base page-aligned virtual address
428 * @param size Page-aligned data region size
429 */
430 void k_mem_page_in(void *addr, size_t size);
431
432 /**
433 * Pin an aligned virtual data region, paging in as necessary
434 *
435 * After the function completes, all the page frames associated with this
436 * region will be resident in memory and pinned such that they stay that way.
437 * This is a stronger version of z_mem_page_in().
438 *
439 * If CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled, this function may not be
440 * called by ISRs as the backing store may be in-use.
441 *
442 * @param addr Base page-aligned virtual address
443 * @param size Page-aligned data region size
444 */
445 void k_mem_pin(void *addr, size_t size);
446
447 /**
448 * Un-pin an aligned virtual data region
449 *
450 * After the function completes, all the page frames associated with this
451 * region will be no longer marked as pinned. This does not evict the region,
452 * follow this with z_mem_page_out() if you need that.
453 *
454 * @param addr Base page-aligned virtual address
455 * @param size Page-aligned data region size
456 */
457 void k_mem_unpin(void *addr, size_t size);
458
459 /**
460 * Get the paging statistics since system startup
461 *
462 * This populates the paging statistics struct being passed in
463 * as argument.
464 *
465 * @param[in,out] stats Paging statistics struct to be filled.
466 */
467 __syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
468
469 struct k_thread;
470 /**
471 * Get the paging statistics since system startup for a thread
472 *
473 * This populates the paging statistics struct being passed in
474 * as argument for a particular thread.
475 *
476 * @param[in] thread Thread
477 * @param[in,out] stats Paging statistics struct to be filled.
478 */
479 __syscall
480 void k_mem_paging_thread_stats_get(struct k_thread *thread,
481 struct k_mem_paging_stats_t *stats);
482
483 /**
484 * Get the eviction timing histogram
485 *
486 * This populates the timing histogram struct being passed in
487 * as argument.
488 *
489 * @param[in,out] hist Timing histogram struct to be filled.
490 */
491 __syscall void k_mem_paging_histogram_eviction_get(
492 struct k_mem_paging_histogram_t *hist);
493
494 /**
495 * Get the backing store page-in timing histogram
496 *
497 * This populates the timing histogram struct being passed in
498 * as argument.
499 *
500 * @param[in,out] hist Timing histogram struct to be filled.
501 */
502 __syscall void k_mem_paging_histogram_backing_store_page_in_get(
503 struct k_mem_paging_histogram_t *hist);
504
505 /**
506 * Get the backing store page-out timing histogram
507 *
508 * This populates the timing histogram struct being passed in
509 * as argument.
510 *
511 * @param[in,out] hist Timing histogram struct to be filled.
512 */
513 __syscall void k_mem_paging_histogram_backing_store_page_out_get(
514 struct k_mem_paging_histogram_t *hist);
515
516 #include <syscalls/mem_manage.h>
517
518 /** @} */
519
520 /**
521 * Eviction algorithm APIs
522 *
523 * @defgroup mem-demand-paging-eviction Eviction Algorithm APIs
524 * @ingroup demand_paging
525 * @{
526 */
527
528 /**
529 * Select a page frame for eviction
530 *
531 * The kernel will invoke this to choose a page frame to evict if there
532 * are no free page frames.
533 *
534 * This function will never be called before the initial
535 * k_mem_paging_eviction_init().
536 *
537 * This function is invoked with interrupts locked.
538 *
539 * @param [out] dirty Whether the page to evict is dirty
540 * @return The page frame to evict
541 */
542 struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
543
544 /**
545 * Initialization function
546 *
547 * Called at POST_KERNEL to perform any necessary initialization tasks for the
548 * eviction algorithm. k_mem_paging_eviction_select() is guaranteed to never be
549 * called until this has returned, and this will only be called once.
550 */
551 void k_mem_paging_eviction_init(void);
552
553 /** @} */
554
555 /**
556 * Backing store APIs
557 *
558 * @defgroup mem-demand-paging-backing-store Backing Store APIs
559 * @ingroup demand_paging
560 * @{
561 */
562
563 /**
564 * Reserve or fetch a storage location for a data page loaded into a page frame
565 *
566 * The returned location token must be unique to the mapped virtual address.
567 * This location will be used in the backing store to page out data page
568 * contents for later retrieval. The location value must be page-aligned.
569 *
570 * This function may be called multiple times on the same data page. If its
571 * page frame has its Z_PAGE_FRAME_BACKED bit set, it is expected to return
572 * the previous backing store location for the data page containing a cached
573 * clean copy. This clean copy may be updated on page-out, or used to
574 * discard clean pages without needing to write out their contents.
575 *
576 * If the backing store is full, some other backing store location which caches
577 * a loaded data page may be selected, in which case its associated page frame
578 * will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
579 *
580 * pf->addr will indicate the virtual address the page is currently mapped to.
581 * Large, sparse backing stores which can contain the entire address space
582 * may simply generate location tokens purely as a function of pf->addr with no
583 * other management necessary.
584 *
585 * This function distinguishes whether it was called on behalf of a page
586 * fault. A free backing store location must always be reserved in order for
587 * page faults to succeed. If the page_fault parameter is not set, this
588 * function should return -ENOMEM even if one location is available.
589 *
590 * This function is invoked with interrupts locked.
591 *
592 * @param pf Virtual address to obtain a storage location
593 * @param [out] location storage location token
594 * @param page_fault Whether this request was for a page fault
595 * @return 0 Success
596 * @return -ENOMEM Backing store is full
597 */
598 int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
599 uintptr_t *location,
600 bool page_fault);
601
602 /**
603 * Free a backing store location
604 *
605 * Any stored data may be discarded, and the location token associated with
606 * this address may be re-used for some other data page.
607 *
608 * This function is invoked with interrupts locked.
609 *
610 * @param location Location token to free
611 */
612 void k_mem_paging_backing_store_location_free(uintptr_t location);
613
614 /**
615 * Copy a data page from Z_SCRATCH_PAGE to the specified location
616 *
617 * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
618 * to the intended source page frame for the calling context.
619 *
620 * Calls to this and k_mem_paging_backing_store_page_in() will always be
621 * serialized, but interrupts may be enabled.
622 *
623 * @param location Location token for the data page, for later retrieval
624 */
625 void k_mem_paging_backing_store_page_out(uintptr_t location);
626
627 /**
628 * Copy a data page from the provided location to Z_SCRATCH_PAGE.
629 *
630 * Immediately before this is called, Z_SCRATCH_PAGE will be mapped read-write
631 * to the intended destination page frame for the calling context.
632 *
633 * Calls to this and k_mem_paging_backing_store_page_out() will always be
634 * serialized, but interrupts may be enabled.
635 *
636 * @param location Location token for the data page
637 */
638 void k_mem_paging_backing_store_page_in(uintptr_t location);
639
640 /**
641 * Update internal accounting after a page-in
642 *
643 * This is invoked after k_mem_paging_backing_store_page_in() and interrupts
644 * have been* re-locked, making it safe to access the z_page_frame data.
645 * The location value will be the same passed to
646 * k_mem_paging_backing_store_page_in().
647 *
648 * The primary use-case for this is to update custom fields for the backing
649 * store in the page frame, to reflect where the data should be evicted to
650 * if it is paged out again. This may be a no-op in some implementations.
651 *
652 * If the backing store caches paged-in data pages, this is the appropriate
653 * time to set the Z_PAGE_FRAME_BACKED bit. The kernel only skips paging
654 * out clean data pages if they are noted as clean in the page tables and the
655 * Z_PAGE_FRAME_BACKED bit is set in their associated page frame.
656 *
657 * @param pf Page frame that was loaded in
658 * @param location Location of where the loaded data page was retrieved
659 */
660 void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
661 uintptr_t location);
662
663 /**
664 * Backing store initialization function.
665 *
666 * The implementation may expect to receive page in/out calls as soon as this
667 * returns, but not before that. Called at POST_KERNEL.
668 *
669 * This function is expected to do two things:
670 * - Initialize any internal data structures and accounting for the backing
671 * store.
672 * - If the backing store already contains all or some loaded kernel data pages
673 * at boot time, Z_PAGE_FRAME_BACKED should be appropriately set for their
674 * associated page frames, and any internal accounting set up appropriately.
675 */
676 void k_mem_paging_backing_store_init(void);
677
678 /** @} */
679
680 #ifdef __cplusplus
681 }
682 #endif
683
684 #endif /* !_ASMLANGUAGE */
685 #endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
686