1# Copyright (c) 2021 Intel Corporation
2#
3# SPDX-License-Identifier: Apache-2.0
4
5menu "Virtual Memory Support"
6
7config KERNEL_VM_SUPPORT
8	bool
9	help
10	  Hidden option to enable virtual memory Kconfigs.
11
12if KERNEL_VM_SUPPORT
13
14DT_CHOSEN_Z_SRAM := zephyr,sram
15
16config KERNEL_VM_BASE
17	hex "Virtual address space base address"
18	default $(dt_chosen_reg_addr_hex,$(DT_CHOSEN_Z_SRAM))
19	help
20	  Define the base of the kernel's address space.
21
22	  By default, this is the same as the DT_CHOSEN_Z_SRAM physical base SRAM
23	  address from DTS, in which case RAM will be identity-mapped. Some
24	  architectures may require RAM to be mapped in this way; they may have
25	  just one RAM region and doing this makes linking much simpler, as
26	  at least when the kernel boots all virtual RAM addresses are the same
27	  as their physical address (demand paging at runtime may later modify
28	  this for non-pinned page frames).
29
30	  Otherwise, if RAM isn't identity-mapped:
31	  1. It is the architecture's responsibility to transition the
32	  instruction pointer to virtual addresses at early boot before
33	  entering the kernel at z_cstart().
34	  2. The underlying architecture may impose constraints on the bounds of
35	  the kernel's address space, such as not overlapping physical RAM
36	  regions if RAM is not identity-mapped, or the virtual and physical
37	  base addresses being aligned to some common value (which allows
38	  double-linking of paging structures to make the instruction pointer
39	  transition simpler).
40
41	  Zephyr does not implement a split address space and if multiple
42	  page tables are in use, they all have the same virtual-to-physical
43	  mappings (with potentially different permissions).
44
45config KERNEL_VM_OFFSET
46	hex "Kernel offset within address space"
47	default 0
48	help
49	  Offset that the kernel image begins within its address space,
50	  if this is not the same offset from the beginning of RAM.
51
52	  Some care may need to be taken in selecting this value. In certain
53	  build-time cases, or when a physical address cannot be looked up
54	  in page tables, the equation:
55
56	      virt = phys + ((KERNEL_VM_BASE + KERNEL_VM_OFFSET) -
57	                     (SRAM_BASE_ADDRESS + SRAM_OFFSET))
58
59	  Will be used to convert between physical and virtual addresses for
60	  memory that is mapped at boot.
61
62	  This uncommon and is only necessary if the beginning of VM and
63	  physical memory have dissimilar alignment.
64
65config KERNEL_VM_SIZE
66	hex "Size of kernel address space in bytes"
67	default 0x800000
68	help
69	  Size of the kernel's address space. Constraining this helps control
70	  how much total memory can be used for page tables.
71
72	  The difference between KERNEL_VM_BASE and KERNEL_VM_SIZE indicates the
73	  size of the virtual region for runtime memory mappings. This is needed
74	  for mapping driver MMIO regions, as well as special RAM mapping use-cases
75	  such as VSDO pages, memory mapped thread stacks, and anonymous memory
76	  mappings. The kernel itself will be mapped in here as well at boot.
77
78	  Systems with very large amounts of memory (such as 512M or more)
79	  will want to use a 64-bit build of Zephyr, there are no plans to
80	  implement a notion of "high" memory in Zephyr to work around physical
81	  RAM size larger than the defined bounds of the virtual address space.
82
83config KERNEL_DIRECT_MAP
84	bool "Memory region direct-map support"
85	depends on MMU
86	help
87	  This enables the direct-map support, namely the region can be 1:1
88	  mapping between virtual address and physical address.
89
90	  If the specific memory region is in the virtual memory space and
91	  there isn't overlap with the existed mappings, it will reserve the
92	  region from the virtual memory space and do the mapping, otherwise
93	  it will fail. And any attempt across the boundary of the virtual
94	  memory space will fail.
95
96	  Note that this is for compatibility and portable apps shouldn't
97	  be using it.
98
99endif # KERNEL_VM_SUPPORT
100
101menuconfig MMU
102	bool "MMU features"
103	depends on CPU_HAS_MMU
104	select KERNEL_VM_SUPPORT
105	help
106	  This option is enabled when the CPU's memory management unit is active
107	  and the arch_mem_map() API is available.
108
109if MMU
110config MMU_PAGE_SIZE
111	hex "Size of smallest granularity MMU page"
112	default 0x1000
113	help
114	  Size of memory pages. Varies per MMU but 4K is common. For MMUs that
115	  support multiple page sizes, put the smallest one here.
116
117menuconfig DEMAND_PAGING
118	bool "Demand paging [EXPERIMENTAL]"
119	depends on ARCH_HAS_DEMAND_PAGING
120	help
121	  Enable demand paging. Requires architecture support in how the kernel
122	  is linked and the implementation of an eviction algorithm and a
123	  backing store for evicted pages.
124
125if DEMAND_PAGING
126config DEMAND_PAGING_ALLOW_IRQ
127	bool "Allow interrupts during page-ins/outs"
128	help
129	  Allow interrupts to be serviced while pages are being evicted or
130	  retrieved from the backing store. This is much better for system
131	  latency, but any code running in interrupt context that page faults
132	  will cause a kernel panic. Such code must work with exclusively pinned
133	  code and data pages.
134
135	  The scheduler is still disabled during this operation.
136
137	  If this option is disabled, the page fault servicing logic
138	  runs with interrupts disabled for the entire operation. However,
139	  ISRs may also page fault.
140
141config DEMAND_PAGING_PAGE_FRAMES_RESERVE
142	int "Number of page frames reserved for paging"
143	default 32 if !LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
144	default 0
145	help
146	  This sets the number of page frames that will be reserved for
147	  paging that do not count towards free memory. This is to
148	  ensure that there are some page frames available for paging
149	  code and data. Otherwise, it would be possible to exhaust
150	  all page frames via anonymous memory mappings.
151
152config DEMAND_PAGING_STATS
153	bool "Gather Demand Paging Statistics"
154	help
155	  This enables gathering various statistics related to demand paging,
156	  e.g. number of pagefaults. This is useful for tuning eviction
157	  algorithms and optimizing backing store.
158
159	  Should say N in production system as this is not without cost.
160
161config DEMAND_PAGING_STATS_USING_TIMING_FUNCTIONS
162	bool "Use Timing Functions to Gather Demand Paging Statistics"
163	select TIMING_FUNCTIONS_NEED_AT_BOOT
164	help
165	  Use timing functions to gather various demand paging statistics.
166
167config DEMAND_PAGING_THREAD_STATS
168	bool "Gather per Thread Demand Paging Statistics"
169	depends on DEMAND_PAGING_STATS
170	help
171	  This enables gathering per thread statistics related to demand
172	  paging.
173
174	  Should say N in production system as this is not without cost.
175
176config DEMAND_PAGING_TIMING_HISTOGRAM
177	bool "Gather Demand Paging Execution Timing Histogram"
178	depends on DEMAND_PAGING_STATS
179	help
180	  This gathers the histogram of execution time on page eviction
181	  selection, and backing store page in and page out.
182
183	  Should say N in production system as this is not without cost.
184
185config DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS
186	int "Number of bins (buckets) in Demand Paging Timing Histogram"
187	depends on DEMAND_PAGING_TIMING_HISTOGRAM
188	default 10
189	help
190	  Defines the number of bins (buckets) in the histogram used for
191	  gathering execution timing information for demand paging.
192
193	  This requires k_mem_paging_eviction_histogram_bounds[] and
194	  k_mem_paging_backing_store_histogram_bounds[] to define
195	  the upper bounds for each bin. See kernel/statistics.c for
196	  information.
197
198endif # DEMAND_PAGING
199endif # MMU
200
201endmenu # Virtual Memory Support
202