1/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 * @brief Interrupt management support for IA-32 architecture
10 *
11 * This module implements assembly routines to manage interrupts on
12 * the Intel IA-32 architecture.  More specifically, the interrupt (asynchronous
13 * exception) stubs are implemented in this module.  The stubs are invoked when
14 * entering and exiting a C interrupt handler.
15 */
16#define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic))
17
18#include <zephyr/arch/x86/ia32/asm.h>
19#include <offsets_short.h>
20#include <zephyr/arch/cpu.h>
21#include <zephyr/drivers/interrupt_controller/sysapic.h>
22
23	/* exports (internal APIs) */
24
25	GTEXT(_interrupt_enter)
26	GTEXT(z_SpuriousIntNoErrCodeHandler)
27	GTEXT(z_SpuriousIntHandler)
28	GTEXT(_irq_sw_handler)
29	GTEXT(z_dynamic_stubs_begin)
30
31	/* externs */
32
33	GTEXT(arch_swap)
34
35#ifdef CONFIG_PM
36	GTEXT(pm_system_resume)
37#endif
38
39/**
40 *
41 * @brief Inform the kernel of an interrupt
42 *
43 * This function is called from the interrupt stub created by IRQ_CONNECT()
44 * to inform the kernel of an interrupt.  This routine increments
45 * _kernel.nested (to support interrupt nesting), switches to the
46 * base of the interrupt stack, if not already on the interrupt stack, and then
47 * saves the volatile integer registers onto the stack.  Finally, control is
48 * returned back to the interrupt stub code (which will then invoke the
49 * "application" interrupt service routine).
50 *
51 * Only the volatile integer registers are saved since ISRs are assumed not to
52 * utilize floating point (or SSE) instructions.
53 *
54 * WARNINGS
55 *
56 * Host-based tools and the target-based GDB agent depend on the stack frame
57 * created by this routine to determine the locations of volatile registers.
58 * These tools must be updated to reflect any changes to the stack frame.
59 *
60 * C function prototype:
61 *
62 * void _interrupt_enter(void *isr, void *isr_param);
63 */
64SECTION_FUNC(PINNED_TEXT, _interrupt_enter)
65	/*
66	 * Note that the processor has pushed both the EFLAGS register
67	 * and the logical return address (cs:eip) onto the stack prior
68	 * to invoking the handler specified in the IDT. The stack looks
69	 * like this:
70	 *
71	 *  24 SS (only on privilege level change)
72	 *  20 ESP (only on privilege level change)
73	 *  16 EFLAGS
74	 *  12 CS
75	 *  8  EIP
76	 *  4  isr_param
77	 *  0  isr   <-- stack pointer
78	 */
79
80	/*
81	 * The gen_idt tool creates an interrupt-gate descriptor for
82	 * all connections.  The processor will automatically clear the IF
83	 * bit in the EFLAGS register upon execution of the handler, hence
84	 * this need not issue an 'cli' as the first instruction.
85	 *
86	 * Clear the direction flag.  It is automatically restored when the
87	 * interrupt exits via the IRET instruction.
88	 */
89
90	cld
91
92#ifdef CONFIG_X86_KPTI
93	call z_x86_trampoline_to_kernel
94#endif
95	/*
96	 * Swap EAX with isr_param and EDX with isr.
97	 * Push ECX onto the stack
98	 */
99	xchgl	%eax, 4(%esp)
100	xchgl	%edx, (%esp)
101	pushl	%ecx
102
103	/* Now the stack looks like:
104	 *
105	 * EFLAGS
106	 * CS
107	 * EIP
108	 * saved EAX
109	 * saved EDX
110	 * saved ECX
111	 *
112	 * EAX = isr_param, EDX = isr
113	 */
114
115	/* Push EBP as we will use it for scratch space.
116     * Also it helps in stack unwinding
117	 * Rest of the callee-saved regs get saved by invocation of C
118	 * functions (isr handler, arch_swap(), etc)
119	 */
120	pushl	%ebp
121
122	/* load %ecx with &_kernel */
123
124	movl	$_kernel, %ecx
125
126	/* switch to the interrupt stack for the non-nested case */
127
128	incl	_kernel_offset_to_nested(%ecx)
129
130	/* use interrupt stack if not nested */
131	cmpl	$1, _kernel_offset_to_nested(%ecx)
132	jne	alreadyOnIntStack
133
134	/*
135	 * switch to base of the interrupt stack: save esp in ebp, then load
136	 * irq_stack pointer
137	 */
138
139	movl	%esp, %ebp
140	movl	_kernel_offset_to_irq_stack(%ecx), %esp
141
142
143	/* save thread's stack pointer onto base of interrupt stack */
144
145	pushl	%ebp			/* Save stack pointer */
146
147#ifdef CONFIG_PM
148	cmpl	$0, _kernel_offset_to_idle(%ecx)
149	jne	handle_idle
150	/* fast path is !idle, in the pipeline */
151#endif /* CONFIG_PM */
152
153	/* fall through to nested case */
154
155alreadyOnIntStack:
156
157	push	%eax	/* interrupt handler argument */
158
159#if defined(CONFIG_TRACING_ISR)
160	/* Save these as we are using to keep track of isr and isr_param */
161	pushl	%eax
162	pushl	%edx
163	call	sys_trace_isr_enter
164	popl	%edx
165	popl	%eax
166#endif
167
168#ifdef CONFIG_NESTED_INTERRUPTS
169	sti			/* re-enable interrupts */
170#endif
171	/* Now call the interrupt handler */
172	call	*%edx
173	/* Discard ISR argument */
174	addl	$0x4, %esp
175#ifdef CONFIG_NESTED_INTERRUPTS
176	cli			/* disable interrupts again */
177#endif
178
179#if defined(CONFIG_TRACING_ISR)
180	pushl	%eax
181	call	sys_trace_isr_exit
182	popl	%eax
183#endif
184
185#if defined(CONFIG_X86_RUNTIME_IRQ_STATS)
186	/*
187	 *  The runtime_irq_stats() function should be implemented
188	 *  by platform with this config.
189	 */
190	pushl	%eax
191	call	runtime_irq_stats
192	popl	%eax
193#endif
194
195	xorl	%eax, %eax
196#if defined(CONFIG_X2APIC)
197	xorl	%edx, %edx
198	movl	$(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx
199	wrmsr
200#else /* xAPIC */
201#ifdef DEVICE_MMIO_IS_IN_RAM
202	movl	Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %edx
203	movl	%eax, LOAPIC_EOI(%edx)
204#else
205	movl	%eax, (LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
206#endif /* DEVICE_MMIO_IS_IN_RAM */
207#endif /* CONFIG_X2APIC */
208
209	/* determine whether exiting from a nested interrupt */
210	movl	$_kernel, %ecx
211	decl	_kernel_offset_to_nested(%ecx)	/* dec interrupt nest count */
212	jne	nestedInterrupt                 /* 'iret' if nested case */
213
214#ifdef CONFIG_PREEMPT_ENABLED
215	movl	_kernel_offset_to_current(%ecx), %edx
216
217	/* reschedule only if the scheduler says that we must do so */
218	cmpl	%edx, _kernel_offset_to_ready_q_cache(%ecx)
219	je	noReschedule
220
221	/*
222	 * Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
223	 * to arch_swap() to determine whether non-floating registers need to be
224	 * preserved using the lazy save/restore algorithm, or to indicate to
225	 * debug tools that a preemptive context switch has occurred.
226	 */
227
228#if defined(CONFIG_LAZY_FPU_SHARING)
229	orb	$X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx)
230#endif
231
232	/*
233	 * A context reschedule is required: keep the volatile registers of
234	 * the interrupted thread on the context's stack.  Utilize
235	 * the existing arch_swap() primitive to save the remaining
236	 * thread's registers (including floating point) and perform
237	 * a switch to the new thread.
238	 */
239
240	popl	%esp	/* switch back to outgoing thread's stack */
241
242#ifdef CONFIG_STACK_SENTINEL
243	call	z_check_stack_sentinel
244#endif
245	pushfl			/* push KERNEL_LOCK_KEY argument */
246	call	arch_swap
247	addl 	$4, %esp	/* pop KERNEL_LOCK_KEY argument */
248
249	/*
250	 * The interrupted thread has now been scheduled,
251	 * as the result of a _later_ invocation of arch_swap().
252	 *
253	 * Now need to restore the interrupted thread's environment before
254	 * returning control to it at the point where it was interrupted ...
255	 */
256
257#if defined(CONFIG_LAZY_FPU_SHARING)
258	/*
259	 * arch_swap() has restored the floating point registers, if needed.
260	 * Clear X86_THREAD_FLAG_INT in the interrupted thread's state
261	 * since it has served its purpose.
262	 */
263
264	movl	_kernel + _kernel_offset_to_current, %eax
265	andb	$~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax)
266#endif /* CONFIG_LAZY_FPU_SHARING */
267
268	/* Restore volatile registers and return to the interrupted thread */
269	popl	%ebp
270	popl	%ecx
271	popl	%edx
272	popl	%eax
273
274	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
275	KPTI_IRET
276
277#endif /* CONFIG_PREEMPT_ENABLED */
278
279noReschedule:
280
281	/*
282	 * A thread reschedule is not required; switch back to the
283	 * interrupted thread's stack and restore volatile registers
284	 */
285
286	popl	%esp		/* pop thread stack pointer */
287
288#ifdef CONFIG_STACK_SENTINEL
289	call	z_check_stack_sentinel
290#endif
291
292	/* fall through to 'nestedInterrupt' */
293
294
295	/*
296	 * For the nested interrupt case, the interrupt stack must still be
297	 * utilized, and more importantly, a rescheduling decision must
298	 * not be performed.
299	 */
300
301nestedInterrupt:
302	popl	%ebp
303	popl	%ecx		/* pop volatile registers in reverse order */
304	popl	%edx
305	popl	%eax
306	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
307	KPTI_IRET
308
309
310#ifdef CONFIG_PM
311handle_idle:
312	pushl	%eax
313	pushl	%edx
314	/* Zero out _kernel.idle */
315	movl	$0, _kernel_offset_to_idle(%ecx)
316
317	/*
318	 * Beware that a timer driver's pm_system_resume() implementation might
319	 * expect that interrupts are disabled when invoked.  This ensures that
320	 * the calculation and programming of the device for the next timer
321	 * deadline is not interrupted.
322	 */
323
324	call	pm_system_resume
325	popl	%edx
326	popl	%eax
327	jmp	alreadyOnIntStack
328#endif /* CONFIG_PM */
329
330/**
331 *
332 * z_SpuriousIntHandler -
333 * @brief Spurious interrupt handler stubs
334 *
335 * Interrupt-gate descriptors are statically created for all slots in the IDT
336 * that point to z_SpuriousIntHandler() or z_SpuriousIntNoErrCodeHandler().  The
337 * former stub is connected to exception vectors where the processor pushes an
338 * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
339 * records.
340 *
341 * A spurious interrupt is considered a fatal condition; there is no provision
342 * to return to the interrupted execution context and thus the volatile
343 * registers are not saved.
344 *
345 * @return Never returns
346 *
347 * C function prototype:
348 *
349 * void z_SpuriousIntHandler (void);
350 *
351 * INTERNAL
352 * The gen_idt tool creates an interrupt-gate descriptor for all
353 * connections.  The processor will automatically clear the IF bit
354 * in the EFLAGS register upon execution of the handler,
355 * thus z_SpuriousIntNoErrCodeHandler()/z_SpuriousIntHandler() shall be
356 * invoked with interrupts disabled.
357 */
358SECTION_FUNC(PINNED_TEXT, z_SpuriousIntNoErrCodeHandler)
359
360	pushl	$0			/* push dummy err code onto stk */
361
362	/* fall through to z_SpuriousIntHandler */
363
364
365SECTION_FUNC(PINNED_TEXT, z_SpuriousIntHandler)
366
367	cld				/* Clear direction flag */
368
369	/* Create the ESF */
370
371	pushl %eax
372	pushl %ecx
373	pushl %edx
374	pushl %edi
375	pushl %esi
376	pushl %ebx
377	pushl %ebp
378
379	leal	44(%esp), %ecx   /* Calculate ESP before exception occurred */
380	pushl	%ecx             /* Save calculated ESP */
381
382	pushl	%esp		/* push cur stack pointer: pEsf arg */
383
384	/* re-enable interrupts */
385	sti
386
387	/* call the fatal error handler */
388	call	z_x86_spurious_irq
389
390	/* handler doesn't  return */
391
392#if CONFIG_IRQ_OFFLOAD
393SECTION_FUNC(PINNED_TEXT, _irq_sw_handler)
394	push $0
395	push $z_irq_do_offload
396	jmp _interrupt_enter
397
398#endif
399
400#if CONFIG_X86_DYNAMIC_IRQ_STUBS > 0
401z_dynamic_irq_stub_common:
402	/* stub number already pushed */
403	push $z_x86_dynamic_irq_handler
404	jmp _interrupt_enter
405
406/* Create all the dynamic IRQ stubs
407 *
408 * NOTE: Please update DYN_STUB_SIZE in include/arch/x86/ia32/arch.h if you
409 * change how large the generated stubs are, otherwise _get_dynamic_stub()
410 * will be unable to correctly determine the offset
411 */
412
413/*
414 * Create nice labels for all the stubs so we can see where we
415 * are in a debugger
416 */
417.altmacro
418.macro __INT_STUB_NUM id
419z_dynamic_irq_stub_\id:
420.endm
421.macro INT_STUB_NUM id
422__INT_STUB_NUM %id
423.endm
424
425z_dynamic_stubs_begin:
426stub_num = 0
427.rept ((CONFIG_X86_DYNAMIC_IRQ_STUBS + Z_DYN_STUB_PER_BLOCK - 1) / Z_DYN_STUB_PER_BLOCK)
428	block_counter = 0
429	.rept Z_DYN_STUB_PER_BLOCK
430		.if stub_num < CONFIG_X86_DYNAMIC_IRQ_STUBS
431			INT_STUB_NUM stub_num
432			/*
433			 * 2-byte push imm8.
434			 */
435			push $stub_num
436
437			/*
438			 * Check to make sure this isn't the last stub in
439			 * a block, in which case we just fall through
440			 */
441			.if (block_counter <> (Z_DYN_STUB_PER_BLOCK - 1) && \
442			     (stub_num <> CONFIG_X86_DYNAMIC_IRQ_STUBS - 1))
443				/* This should always be a 2-byte jmp rel8 */
444				jmp 1f
445			.endif
446			stub_num = stub_num + 1
447			block_counter = block_counter + 1
448		.endif
449	.endr
450	/*
451	 * This must a 5-bvte jump rel32, which is why z_dynamic_irq_stub_common
452	 * is before the actual stubs
453	 */
4541:	jmp z_dynamic_irq_stub_common
455.endr
456#endif /* CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 */
457
458