1/*
2 * Copyright (c) 2011-2015 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 * @brief Exception management support for IA-32 architecture
10 *
11 * This module implements assembly routines to manage exceptions (synchronous
12 * interrupts) on the Intel IA-32 architecture.  More specifically,
13 * exceptions are implemented in this module.  The stubs are invoked when entering
14 * and exiting a C exception handler.
15 */
16
17#include <zephyr/arch/x86/ia32/asm.h>
18#include <zephyr/arch/x86/ia32/arch.h> /* For MK_ISR_NAME */
19#include <offsets_short.h>
20
21
22	/* exports (internal APIs) */
23
24	GTEXT(_exception_enter)
25	GTEXT(_kernel_oops_handler)
26
27	/* externs (internal APIs) */
28	GTEXT(z_x86_do_kernel_oops)
29
30/**
31 *
32 * @brief Inform the kernel of an exception
33 *
34 * This function is called from the exception stub created by nanoCpuExcConnect()
35 * to inform the kernel of an exception.  This routine currently does
36 * _not_ increment a thread/interrupt specific exception count.  Also,
37 * execution of the exception handler occurs on the current stack, i.e.
38 * this does not switch to another stack.  The volatile integer
39 * registers are saved on the stack, and control is returned back to the
40 * exception stub.
41 *
42 * WARNINGS
43 *
44 * Host-based tools and the target-based GDB agent depend on the stack frame
45 * created by this routine to determine the locations of volatile registers.
46 * These tools must be updated to reflect any changes to the stack frame.
47 *
48 * C function prototype:
49 *
50 * void _exception_enter(uint32_t error_code, void *handler)
51 *
52 */
53
54SECTION_FUNC(PINNED_TEXT, _exception_enter)
55
56	/*
57	 * The gen_idt tool creates an interrupt-gate descriptor for
58	 * all connections.  The processor will automatically clear the IF
59	 * bit in the EFLAGS register upon execution of the handler, thus
60	 * this does need not issue an 'cli' as the first instruction.
61	 *
62	 * Note that the processor has pushed both the EFLAGS register
63	 * and the linear return address (cs:eip) onto the stack prior
64	 * to invoking the handler specified in the IDT.
65	 *
66	 * Clear the direction flag.  It is automatically restored when the
67	 * exception exits.
68	 */
69
70	cld
71
72#ifdef CONFIG_X86_KPTI
73	call z_x86_trampoline_to_kernel
74#endif
75	/*
76	 * Swap ecx and handler function on the current stack;
77	 */
78	xchgl	%ecx, (%esp)
79
80	/* By the time we get here, the stack should look like this:
81	 * ESP -> ECX (excepting task)
82	 *	  Exception Error code (or junk)
83	 *	  EIP (excepting task)
84	 *	  CS (excepting task)
85	 *	  EFLAGS (excepting task)
86	 *	  ...
87	 *
88	 * ECX now contains the address of the handler function */
89
90	/*
91	 * Push the remaining volatile registers on the existing stack.
92	 */
93
94	pushl	%eax
95	pushl	%edx
96
97	/*
98	 * Push the cooperative registers on the existing stack as they are
99	 * required by debug tools.
100	 */
101
102	pushl	%edi
103	pushl	%esi
104	pushl	%ebx
105	pushl	%ebp
106
107#ifdef CONFIG_USERSPACE
108	/* Test if interrupted context was in ring 3 */
109	testb	$3, 36(%esp)
110	jz 1f
111	/* It was. The original stack pointer is on the stack 44 bytes
112	 * from the current top
113	 */
114	pushl	44(%esp)
115	jmp 2f
1161:
117#endif
118	leal	44(%esp), %eax   /* Calculate ESP before interrupt occurred */
119	pushl	%eax             /* Save calculated ESP */
120#ifdef CONFIG_USERSPACE
1212:
122#endif
123
124#ifdef CONFIG_GDBSTUB
125	pushl %ds
126	pushl %es
127	pushl %fs
128	pushl %gs
129	pushl %ss
130#endif
131	/* ESP is pointing to the ESF at this point */
132
133#if defined(CONFIG_LAZY_FPU_SHARING)
134
135	movl	_kernel + _kernel_offset_to_current, %edx
136
137	/* inc exception nest count */
138	incl	_thread_offset_to_excNestCount(%edx)
139
140	/*
141	 * Set X86_THREAD_FLAG_EXC in the current thread. This enables
142	 * z_swap() to preserve the thread's FP registers (where needed)
143	 * if the exception handler causes a context switch. It also
144	 * indicates to debug tools that an exception is being handled
145	 * in the event of a context switch.
146	 */
147
148	orb	$X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%edx)
149
150#endif /* CONFIG_LAZY_FPU_SHARING */
151
152	/*
153	 * restore interrupt enable state, then call the handler
154	 *
155	 * interrupts are enabled only if they were allowed at the time
156	 * the exception was triggered -- this protects kernel level code
157	 * that mustn't be interrupted
158	 *
159	 * Test IF bit of saved EFLAGS and re-enable interrupts if IF=1.
160	 */
161
162	/* ESP is still pointing to the ESF at this point */
163
164	testl	$0x200, __z_arch_esf_t_eflags_OFFSET(%esp)
165	je	allDone
166	sti
167
168allDone:
169	pushl	%esp			/* push z_arch_esf_t * parameter */
170	call	*%ecx			/* call exception handler */
171	addl	$0x4, %esp
172
173#if defined(CONFIG_LAZY_FPU_SHARING)
174
175	movl	_kernel + _kernel_offset_to_current, %ecx
176
177	/*
178	 * Must lock interrupts to prevent outside interference.
179	 * (Using "lock" prefix would be nicer, but this won't work
180	 * on platforms that don't respect the CPU's bus lock signal.)
181	 */
182
183	cli
184
185	/*
186	 * Determine whether exiting from a nested interrupt.
187	 */
188
189	decl	_thread_offset_to_excNestCount(%ecx)
190
191	cmpl	$0, _thread_offset_to_excNestCount(%ecx)
192	jne	nestedException
193
194	/*
195	 * Clear X86_THREAD_FLAG_EXC in the k_thread of the current execution
196	 * context if we are not in a nested exception (ie, when we exit the
197	 * outermost exception).
198	 */
199
200	andb	$~X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%ecx)
201
202nestedException:
203#endif /* CONFIG_LAZY_FPU_SHARING */
204
205#ifdef CONFIG_GDBSTUB
206	popl %ss
207	popl %gs
208	popl %fs
209	popl %es
210	popl %ds
211#endif
212	/*
213	 * Pop the non-volatile registers from the stack.
214	 * Note that debug tools may have altered the saved register values while
215	 * the task was stopped, and we want to pick up the altered values.
216	 */
217
218	popl	%ebp		/* Discard saved ESP */
219	popl	%ebp
220	popl	%ebx
221	popl	%esi
222	popl	%edi
223
224	/* restore edx and ecx which are always saved on the stack */
225
226	popl	%edx
227	popl	%eax
228	popl	%ecx
229
230	addl	$4, %esp	/* "pop" error code */
231
232	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
233	KPTI_IRET
234
235SECTION_FUNC(PINNED_TEXT, _kernel_oops_handler)
236	push $0 /* dummy error code */
237	push $z_x86_do_kernel_oops
238	jmp _exception_enter
239