1/*
2 * Copyright (c) 2019-2021 Intel Corp.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6#include <linker/linker-defs.h>
7#include <linker/linker-tool.h>
8
9#define ROMABLE_REGION RAM
10#define RAMABLE_REGION RAM
11
12#define MMU_PAGE_ALIGN		. = ALIGN(CONFIG_MMU_PAGE_SIZE);
13
14/* Used to align areas with separate memory permission characteristics
15 * so that the page permissions can be set in the MMU. Without this,
16 * the kernel is just one blob with the same RWX permissions on all RAM
17 */
18#ifdef CONFIG_SRAM_REGION_PERMISSIONS
19	#define MMU_PAGE_ALIGN_PERM	MMU_PAGE_ALIGN
20#else
21	#define MMU_PAGE_ALIGN_PERM
22#endif
23
24ENTRY(CONFIG_KERNEL_ENTRY)
25
26SECTIONS
27{
28	/*
29	 * The "locore" must be in the 64K of RAM, so that 16-bit code (with
30	 * segment registers == 0x0000) and 32/64-bit code agree on addresses.
31	 * ... there is no 16-bit code yet, but there will be when we add SMP.
32	 */
33
34	SECTION_PROLOGUE(.locore,,)
35	{
36	_locore_start = .;
37	*(.locore)
38	*(.locore.*)
39	MMU_PAGE_ALIGN_PERM
40	_locore_end = .;
41
42	_lorodata_start = .;
43	*(.lorodata)
44	MMU_PAGE_ALIGN_PERM
45	_lodata_start = .;
46
47	*(.lodata)
48
49#ifdef CONFIG_X86_KPTI
50	/* Special page containing supervisor data that is still mapped in
51	 * user mode page tables. GDT, TSSes, trampoline stack, and
52	 * any LDT must go here as they always must live in a page that is
53	 * marked 'present'. Still not directly user accessible, but
54	 * no sensitive data should be here as Meltdown exploits may read it.
55	 *
56	 * On x86-64 the IDT is in rodata and doesn't need to be in the
57	 * trampoline page.
58	 */
59	MMU_PAGE_ALIGN_PERM
60	z_shared_kernel_page_start = .;
61#endif /* CONFIG_X86_KPTI */
62
63	*(.tss)
64	*(.gdt)
65
66#ifdef CONFIG_X86_KPTI
67	*(.trampolines)
68	MMU_PAGE_ALIGN_PERM
69	z_shared_kernel_page_end = .;
70
71	ASSERT(z_shared_kernel_page_end - z_shared_kernel_page_start == 4096,
72	       "shared kernel area is not one memory page");
73#endif /* CONFIG_X86_KPTI */
74
75	. = ALIGN(CONFIG_MMU_PAGE_SIZE);
76	_lodata_end = .;
77	} > LOCORE
78
79	_locore_size = _lorodata_start - _locore_start;
80	_lorodata_size = _lodata_start - _lorodata_start;
81	_lodata_size = _lodata_end - _lodata_start;
82
83	/*
84	 * The rest of the system is loaded in "normal" memory (typically
85	 * placed above 1MB to avoid the by memory hole at 0x90000-0xFFFFF).
86	 */
87
88	SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
89	{
90	. = ALIGN(16);
91	__rom_region_start = .;
92	__text_region_start = .;
93	z_mapped_start = .;
94	*(.text)
95	*(.text.*)
96
97	#include <linker/kobject-text.ld>
98
99	MMU_PAGE_ALIGN_PERM
100	} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
101
102	__text_region_end = .;
103	__text_region_size = __text_region_end - __text_region_start;
104	__rodata_region_start = .;
105
106	#include <linker/common-rom.ld>
107	#include <linker/thread-local-storage.ld>
108
109	SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
110	{
111	. = ALIGN(16);
112	*(.rodata)
113	*(.rodata.*)
114
115	MMU_PAGE_ALIGN
116	#include <snippets-rodata.ld>
117
118#ifdef CONFIG_X86_MMU
119	. = ALIGN(8);
120	_mmu_region_list_start = .;
121	KEEP(*("._mmu_region.static.*"))
122	_mmu_region_list_end = .;
123#endif /* CONFIG_X86_MMU */
124
125	#include <linker/kobject-rom.ld>
126	} GROUP_ROM_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
127
128#include <linker/cplusplus-rom.ld>
129
130	MMU_PAGE_ALIGN_PERM
131	__rodata_region_end = .;
132	__rodata_region_size = __rodata_region_end - __rodata_region_start;
133	__rom_region_end = .;
134
135#ifdef CONFIG_USERSPACE
136	/* APP SHARED MEMORY REGION */
137#define SMEM_PARTITION_ALIGN(size) MMU_PAGE_ALIGN_PERM
138#define APP_SHARED_ALIGN  MMU_PAGE_ALIGN_PERM
139
140#include <app_smem.ld>
141
142	_image_ram_start = _app_smem_start;
143	_app_smem_size = _app_smem_end - _app_smem_start;
144	_app_smem_num_words = _app_smem_size >> 2;
145	_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
146	_app_smem_num_words = _app_smem_size >> 2;
147#endif /* CONFIG_USERSPACE */
148
149/* This should be put here before BSS section, otherwise the .bss.__gcov will
150 * be put in BSS section. That causes gcov not work properly */
151#include <snippets-ram-sections.ld>
152
153	SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD),)
154	{
155	. = ALIGN(16);
156	MMU_PAGE_ALIGN_PERM
157#ifndef CONFIG_USERSPACE
158	_image_ram_start = .;
159#endif
160	__kernel_ram_start = .;
161	__bss_start = .;
162	*(.bss)
163	*(.bss.*)
164	*(COMMON)
165	. = ALIGN(4);	/* so __bss_num_dwords is exact */
166	__bss_end = .;
167	} GROUP_NOLOAD_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
168
169	__bss_num_dwords = (__bss_end - __bss_start) >> 2;
170
171#include <linker/common-noinit.ld>
172
173#include <snippets-sections.ld>
174
175	SECTION_PROLOGUE(_DATA_SECTION_NAME,,)
176	{
177	. = ALIGN(16);
178	*(.data)
179	*(.data.*)
180	#include <snippets-rwdata.ld>
181	} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
182
183#include <linker/common-ram.ld>
184#include <linker/cplusplus-ram.ld>
185#include <arch/x86/pagetables.ld>
186
187/* Located in generated directory. This file is populated by the
188 * zephyr_linker_sources() Cmake function.
189 */
190#include <snippets-data-sections.ld>
191
192/* Must be last in RAM */
193#include <linker/kobject-data.ld>
194	MMU_PAGE_ALIGN
195	_image_ram_end = .;
196	z_mapped_end = .;
197	_end = .;
198
199	/* All unused memory also owned by the kernel for heaps */
200	__kernel_ram_end = KERNEL_BASE_ADDR + KERNEL_RAM_SIZE;
201	__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
202
203	z_mapped_size = z_mapped_end - z_mapped_start;
204
205#include <linker/debug-sections.ld>
206
207	/DISCARD/ :
208	{
209	*(.got)
210	*(.got.plt)
211	*(.igot)
212	*(.igot.plt)
213	*(.iplt)
214	*(.plt)
215	*(.note.GNU-stack)
216	*(.rel.*)
217	*(.rela.*)
218	}
219}
220