1/*
2 * Copyright (c) 2010-2015 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6/**
7 * @file
8 * @brief Crt0 module for the IA-32 boards
9 *
10 * This module contains the initial code executed by the Zephyr Kernel ELF image
11 * after having been loaded into RAM.
12 *
13 * Note that most addresses (functions and variables) must be in physical
14 * address space. Depending on page table setup, they may or may not be
15 * available in virtual address space after loading of page table.
16 */
17
18#include <zephyr/arch/x86/ia32/asm.h>
19#include <zephyr/arch/x86/msr.h>
20#include <kernel_arch_data.h>
21#include <zephyr/arch/cpu.h>
22#include <zephyr/arch/x86/multiboot.h>
23#include <x86_mmu.h>
24#include <zephyr/kernel/mm.h>
25
26	/* exports (private APIs) */
27
28	GTEXT(__start)
29
30	/* externs */
31	GTEXT(z_prep_c)
32	GTEXT(z_bss_zero)
33	GTEXT(z_data_copy)
34
35	GDATA(_idt_base_address)
36	GDATA(z_interrupt_stacks)
37	GDATA(z_x86_idt)
38#ifndef CONFIG_GDT_DYNAMIC
39	GDATA(_gdt)
40#endif
41
42
43#if defined(CONFIG_X86_SSE)
44	GDATA(_sse_mxcsr_default_value)
45#endif
46
47#if defined(CONFIG_THREAD_LOCAL_STORAGE)
48	GTEXT(z_x86_early_tls_update_gdt)
49#endif
50
51	GDATA(x86_cpu_boot_arg)
52
53.macro install_page_tables
54#ifdef CONFIG_X86_MMU
55	/* Enable paging. If virtual memory is enabled, the instruction pointer
56	 * is currently at a physical address. There is an identity mapping
57	 * for all RAM, plus a virtual mapping of RAM starting at
58	 * CONFIG_KERNEL_VM_BASE using the same paging structures.
59	 *
60	 * Until we enable these page tables, only physical memory addresses
61	 * work.
62	 */
63	movl	$K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
64	movl	%eax, %cr3
65
66#ifdef CONFIG_X86_PAE
67	/* Enable PAE */
68	movl	%cr4, %eax
69	orl	$CR4_PAE, %eax
70	movl	%eax, %cr4
71
72	/* IA32_EFER NXE bit set */
73	movl	$0xC0000080, %ecx
74	rdmsr
75	orl	$0x800, %eax
76	wrmsr
77#else
78	/* Enable Page Size Extensions (allowing 4MB pages).
79	 * This is ignored if PAE is enabled so no need to do
80	 * this above in PAE code.
81	 */
82	movl	%cr4, %eax
83	orl	$CR4_PSE, %eax
84	movl	%eax, %cr4
85#endif /* CONFIG_X86_PAE */
86
87	/* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */
88	movl	%cr0, %eax
89	orl	$(CR0_PG | CR0_WP), %eax
90	movl	%eax, %cr0
91
92#ifdef K_MEM_IS_VM_KERNEL
93	/* Jump to a virtual address, which works because the identity and
94	 * virtual mappings both are to the same physical address.
95	 */
96	ljmp    $CODE_SEG, $vm_enter
97vm_enter:
98	/* We are now executing in virtual memory. We'll un-map the identity
99	 * mappings later once we are in the C domain
100	 */
101#endif /* K_MEM_IS_VM_KERNEL */
102
103#endif /* CONFIG_X86_MMU */
104.endm
105
106SECTION_FUNC(BOOT_TEXT, __start)
107
108#include "../common.S"
109
110	/* Enable write-back caching by clearing the NW and CD bits */
111	movl	%cr0, %eax
112	andl	$0x9fffffff, %eax
113	movl	%eax, %cr0
114
115	/*
116	 * Ensure interrupts are disabled.  Interrupts are enabled when
117	 * the first context switch occurs.
118	 */
119
120	cli
121
122	/*
123	 * Although the bootloader sets up an Interrupt Descriptor Table (IDT)
124	 * and a Global Descriptor Table (GDT), the specification encourages
125	 * booted operating systems to setup their own IDT and GDT.
126	 */
127#if CONFIG_SET_GDT
128	/* load 32-bit operand size GDT */
129	lgdt	K_MEM_PHYS_ADDR(_gdt_rom)
130
131	/* If we set our own GDT, update the segment registers as well.
132	 */
133	movw	$DATA_SEG, %ax	/* data segment selector (entry = 3) */
134	movw	%ax, %ds	/* set DS */
135	movw	%ax, %es	/* set ES */
136	movw	%ax, %ss	/* set SS */
137	xorw	%ax, %ax	/* AX = 0 */
138	movw	%ax, %fs	/* Zero FS */
139	movw	%ax, %gs	/* Zero GS */
140
141	ljmp	$CODE_SEG, $K_MEM_PHYS_ADDR(__csSet)	/* set CS = 0x08 */
142
143__csSet:
144#endif /* CONFIG_SET_GDT */
145
146#if !defined(CONFIG_FPU)
147	/*
148	 * Force an #NM exception for floating point instructions
149	 * since FP support hasn't been configured
150	 */
151
152	movl	%cr0, %eax		/* move CR0 to EAX */
153	orl	$0x2e, %eax		/* CR0[NE+TS+EM+MP]=1 */
154	movl	%eax, %cr0		/* move EAX to CR0 */
155#else
156	/*
157	 * Permit use of x87 FPU instructions
158	 *
159	 * Note that all floating point exceptions are masked by default,
160	 * and that _no_ handler for x87 FPU exceptions (#MF) is provided.
161	 */
162
163	movl	%cr0, %eax		/* move CR0 to EAX */
164	orl	$0x22, %eax		/* CR0[NE+MP]=1 */
165	andl	$~0xc, %eax		/* CR0[TS+EM]=0 */
166	movl	%eax, %cr0		/* move EAX to CR0 */
167
168	fninit				/* set x87 FPU to its default state */
169
170  #if defined(CONFIG_X86_SSE)
171	/*
172	 * Permit use of SSE instructions
173	 *
174	 * Note that all SSE exceptions are masked by default,
175	 * and that _no_ handler for SSE exceptions (#XM) is provided.
176	 */
177
178	movl	%cr4, %eax		/* move CR4 to EAX */
179	orl	$0x200, %eax		/* CR4[OSFXSR] = 1 */
180	andl	$~0x400, %eax		/* CR4[OSXMMEXCPT] = 0 */
181	movl	%eax, %cr4		/* move EAX to CR4 */
182
183	/* initialize SSE control/status reg */
184	ldmxcsr K_MEM_PHYS_ADDR(_sse_mxcsr_default_value)
185
186  #endif /* CONFIG_X86_SSE */
187
188#endif /* !CONFIG_FPU */
189
190	/*
191	 * Set the stack pointer to the area used for the interrupt stack.
192	 * Note this stack is used during the execution of __start() and
193	 * z_cstart() until the multi-tasking kernel is initialized.  The
194	 * dual-purposing of this area of memory is safe since
195	 * interrupts are disabled until the first context switch.
196	 *
197	 * kernel/init.c enforces that the z_interrupt_stacks pointer and
198	 * the ISR stack size are some multiple of ARCH_STACK_PTR_ALIGN, which
199	 * is at least 4.
200	 */
201#ifdef CONFIG_INIT_STACKS
202	movl $0xAAAAAAAA, %eax
203	leal K_MEM_PHYS_ADDR(z_interrupt_stacks), %edi
204#ifdef CONFIG_X86_STACK_PROTECTION
205	addl $4096, %edi
206#endif
207	stack_size_dwords = (CONFIG_ISR_STACK_SIZE / 4)
208	movl $stack_size_dwords, %ecx
209	rep  stosl
210#endif
211
212	movl	$K_MEM_PHYS_ADDR(z_interrupt_stacks), %esp
213#ifdef CONFIG_X86_STACK_PROTECTION
214	/* In this configuration, all stacks, including IRQ stack, are declared
215	 * with a 4K non-present guard page preceding the stack buffer
216	 */
217	addl	$(CONFIG_ISR_STACK_SIZE + 4096), %esp
218#else
219	addl	$CONFIG_ISR_STACK_SIZE, %esp
220#endif
221
222#ifdef CONFIG_XIP
223	/* Copy data from flash to RAM.
224	 *
225	 * This is a must is CONFIG_GDT_DYNAMIC is enabled,
226	 * as _gdt needs to be in RAM.
227	 */
228	call	z_data_copy
229#endif
230
231	/* Note that installing page tables must be done after
232	 * z_data_copy() as the page tables are being copied into
233	 * RAM there.
234	 */
235	install_page_tables
236
237#ifdef CONFIG_GDT_DYNAMIC
238	/* activate RAM-based Global Descriptor Table (GDT) */
239	lgdt	%ds:_gdt
240#endif
241
242#if defined(CONFIG_X86_ENABLE_TSS)
243	mov $MAIN_TSS, %ax
244	ltr %ax
245#endif
246
247#ifdef K_MEM_IS_VM_KERNEL
248	/* Need to reset the stack to virtual address after
249	 * page table is loaded.
250	 */
251
252	movl	$z_interrupt_stacks, %esp
253#ifdef CONFIG_X86_STACK_PROTECTION
254	addl	$(CONFIG_ISR_STACK_SIZE + 4096), %esp
255#else
256	addl	$CONFIG_ISR_STACK_SIZE, %esp
257#endif
258#endif /* K_MEM_IS_VM_KERNEL */
259
260#ifdef CONFIG_THREAD_LOCAL_STORAGE
261	pushl %esp
262	call z_x86_early_tls_update_gdt
263	popl %esp
264#endif
265	/* Clear BSS */
266#ifdef CONFIG_LINKER_USE_BOOT_SECTION
267	call	z_bss_zero_boot
268#endif
269#ifdef CONFIG_LINKER_USE_PINNED_SECTION
270	call	z_bss_zero_pinned
271#endif
272#ifdef CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT
273	/* Don't clear BSS if the section is not present
274	 * in memory at boot. Or else it would cause page
275	 * faults. Zeroing BSS will be done later once the
276	 * paging mechanism has been initialized.
277	 */
278	call	z_bss_zero
279#endif
280
281	/* load 32-bit operand size IDT */
282	lidt	z_x86_idt
283
284	movl	$x86_cpu_boot_arg, %ebp
285	/* Boot type to multiboot, ebx content will help to mitigate */
286	movl	$MULTIBOOT_BOOT_TYPE, \
287			__x86_boot_arg_t_boot_type_OFFSET(%ebp)
288	/* pointer to multiboot info, or NULL */
289	movl	%ebx, __x86_boot_arg_t_arg_OFFSET(%ebp)
290	pushl	$x86_cpu_boot_arg
291	call	z_prep_c	/* enter kernel; never returns */
292
293#if defined(CONFIG_X86_SSE)
294
295	/* SSE control & status register initial value */
296
297_sse_mxcsr_default_value:
298	.long	0x1f80			/* all SSE exceptions clear & masked */
299
300#endif /* CONFIG_X86_SSE */
301
302	 /* Interrupt Descriptor Table (IDT) definition */
303
304z_x86_idt:
305	.word	(CONFIG_IDT_NUM_VECTORS * 8) - 1 /* limit: size of IDT-1 */
306
307	/*
308	 * Physical start address = 0.  When executing natively, this
309	 * will be placed at the same location as the interrupt vector table
310	 * setup by the BIOS (or GRUB?).
311	 */
312
313	/* IDT table start address */
314	.long	_idt_base_address
315
316
317#ifdef CONFIG_SET_GDT
318	/*
319	 * The following 3 GDT entries implement the so-called "basic
320	 * flat model", i.e. a single code segment descriptor and a single
321	 * data segment descriptor, giving the kernel access to a continuous,
322	 * unsegmented address space.  Both segment descriptors map the entire
323	 * linear address space (i.e. 0 to 4 GB-1), thus the segmentation
324	 * mechanism will never generate "out of limit memory reference"
325	 * exceptions even if physical memory does not reside at the referenced
326	 * address.
327	 *
328	 * The 'A' (accessed) bit in the type field is set for all the
329	 * data/code segment descriptors to accommodate placing these entries
330	 * in ROM, to prevent the processor from freaking out when it tries
331	 * and fails to set it.
332	 */
333
334SECTION_VAR(PINNED_RODATA, _gdt_rom)
335#ifndef CONFIG_GDT_DYNAMIC
336_gdt:
337#endif
338
339	/* GDT should be aligned on 8-byte boundary for best processor
340	 * performance, see Section 3.5.1 of IA architecture SW developer
341	 * manual, Vol 3.
342	 */
343
344	.balign 8
345
346	/* Entry 0 (selector=0x0000): The "NULL descriptor". The CPU never
347	 * actually looks at this entry, so we stuff 6-byte the pseudo
348	 * descriptor here */
349
350	/* Limit on GDT */
351	.word K_MEM_PHYS_ADDR(_gdt_rom_end) - K_MEM_PHYS_ADDR(_gdt_rom) - 1
352	/* table address: _gdt_rom */
353	.long K_MEM_PHYS_ADDR(_gdt_rom)
354	.word   0x0000
355
356	/* Entry 1 (selector=0x0008): Code descriptor: DPL0 */
357
358	.word   0xffff		/* limit: xffff */
359	.word   0x0000		/* base : xxxx0000 */
360	.byte   0x00		/* base : xx00xxxx */
361	.byte   0x9b		/* Accessed, Code e/r, Present, DPL0 */
362	.byte   0xcf		/* limit: fxxxx, Page Gra, 32bit */
363	.byte   0x00		/* base : 00xxxxxx */
364
365	/* Entry 2 (selector=0x0010): Data descriptor: DPL0 */
366
367	.word   0xffff		/* limit: xffff */
368	.word   0x0000		/* base : xxxx0000 */
369	.byte   0x00		/* base : xx00xxxx */
370	.byte   0x93		/* Accessed, Data r/w, Present, DPL0 */
371	.byte   0xcf		/* limit: fxxxx, Page Gra, 32bit */
372	.byte   0x00		/* base : 00xxxxxx */
373
374_gdt_rom_end:
375#endif
376