1/*
2 * Copyright (c) 2010-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7/**
8 * @file
9 * @brief Interrupt management support for IA-32 architecture
10 *
11 * This module implements assembly routines to manage interrupts on
12 * the Intel IA-32 architecture.  More specifically, the interrupt (asynchronous
13 * exception) stubs are implemented in this module.  The stubs are invoked when
14 * entering and exiting a C interrupt handler.
15 */
16#define LOAPIC_BASE_ADDRESS DT_REG_ADDR(DT_NODELABEL(intc_loapic))
17
18#include <zephyr/arch/x86/ia32/asm.h>
19#include <offsets_short.h>
20#include <zephyr/arch/cpu.h>
21#include <zephyr/drivers/interrupt_controller/sysapic.h>
22
23	/* exports (internal APIs) */
24
25	GTEXT(_interrupt_enter)
26	GTEXT(z_SpuriousIntNoErrCodeHandler)
27	GTEXT(z_SpuriousIntHandler)
28	GTEXT(_irq_sw_handler)
29	GTEXT(z_dynamic_stubs_begin)
30
31	/* externs */
32
33	GTEXT(arch_swap)
34
35#ifdef CONFIG_PM
36	GTEXT(z_pm_save_idle_exit)
37#endif
38
39/**
40 *
41 * @brief Inform the kernel of an interrupt
42 *
43 * This function is called from the interrupt stub created by IRQ_CONNECT()
44 * to inform the kernel of an interrupt.  This routine increments
45 * _kernel.nested (to support interrupt nesting), switches to the
46 * base of the interrupt stack, if not already on the interrupt stack, and then
47 * saves the volatile integer registers onto the stack.  Finally, control is
48 * returned back to the interrupt stub code (which will then invoke the
49 * "application" interrupt service routine).
50 *
51 * Only the volatile integer registers are saved since ISRs are assumed not to
52 * utilize floating point (or SSE) instructions.
53 *
54 * WARNINGS
55 *
56 * Host-based tools and the target-based GDB agent depend on the stack frame
57 * created by this routine to determine the locations of volatile registers.
58 * These tools must be updated to reflect any changes to the stack frame.
59 *
60 * C function prototype:
61 *
62 * void _interrupt_enter(void *isr, void *isr_param);
63 */
64SECTION_FUNC(PINNED_TEXT, _interrupt_enter)
65	/*
66	 * Note that the processor has pushed both the EFLAGS register
67	 * and the logical return address (cs:eip) onto the stack prior
68	 * to invoking the handler specified in the IDT. The stack looks
69	 * like this:
70	 *
71	 *  24 SS (only on privilege level change)
72	 *  20 ESP (only on privilege level change)
73	 *  16 EFLAGS
74	 *  12 CS
75	 *  8  EIP
76	 *  4  isr_param
77	 *  0  isr   <-- stack pointer
78	 */
79
80	/*
81	 * The gen_idt tool creates an interrupt-gate descriptor for
82	 * all connections.  The processor will automatically clear the IF
83	 * bit in the EFLAGS register upon execution of the handler, hence
84	 * this need not issue an 'cli' as the first instruction.
85	 *
86	 * Clear the direction flag.  It is automatically restored when the
87	 * interrupt exits via the IRET instruction.
88	 */
89
90	cld
91
92#ifdef CONFIG_X86_KPTI
93	call z_x86_trampoline_to_kernel
94#endif
95	/*
96	 * Swap EAX with isr_param and EDX with isr.
97	 * Push ECX onto the stack
98	 */
99	xchgl	%eax, 4(%esp)
100	xchgl	%edx, (%esp)
101	pushl	%ecx
102
103	/* Now the stack looks like:
104	 *
105	 * EFLAGS
106	 * CS
107	 * EIP
108	 * saved EAX
109	 * saved EDX
110	 * saved ECX
111	 *
112	 * EAX = isr_param, EDX = isr
113	 */
114
115	/* Push EDI as we will use it for scratch space.
116	 * Rest of the callee-saved regs get saved by invocation of C
117	 * functions (isr handler, arch_swap(), etc)
118	 */
119	pushl	%edi
120
121	/* load %ecx with &_kernel */
122
123	movl	$_kernel, %ecx
124
125	/* switch to the interrupt stack for the non-nested case */
126
127	incl	_kernel_offset_to_nested(%ecx)
128
129	/* use interrupt stack if not nested */
130	cmpl	$1, _kernel_offset_to_nested(%ecx)
131	jne	alreadyOnIntStack
132
133	/*
134	 * switch to base of the interrupt stack: save esp in edi, then load
135	 * irq_stack pointer
136	 */
137
138	movl	%esp, %edi
139	movl	_kernel_offset_to_irq_stack(%ecx), %esp
140
141
142	/* save thread's stack pointer onto base of interrupt stack */
143
144	pushl	%edi			/* Save stack pointer */
145
146#ifdef CONFIG_PM
147	cmpl	$0, _kernel_offset_to_idle(%ecx)
148	jne	handle_idle
149	/* fast path is !idle, in the pipeline */
150#endif /* CONFIG_PM */
151
152	/* fall through to nested case */
153
154alreadyOnIntStack:
155
156	push	%eax	/* interrupt handler argument */
157
158#if defined(CONFIG_TRACING_ISR)
159	/* Save these as we are using to keep track of isr and isr_param */
160	pushl	%eax
161	pushl	%edx
162	call	sys_trace_isr_enter
163	popl	%edx
164	popl	%eax
165#endif
166
167#ifdef CONFIG_NESTED_INTERRUPTS
168	sti			/* re-enable interrupts */
169#endif
170	/* Now call the interrupt handler */
171	call	*%edx
172	/* Discard ISR argument */
173	addl	$0x4, %esp
174#ifdef CONFIG_NESTED_INTERRUPTS
175	cli			/* disable interrupts again */
176#endif
177
178#if defined(CONFIG_TRACING_ISR)
179	pushl	%eax
180	call	sys_trace_isr_exit
181	popl	%eax
182#endif
183
184#if defined(CONFIG_X86_RUNTIME_IRQ_STATS)
185	/*
186	 *  The runtime_irq_stats() function should be implemented
187	 *  by platform with this config.
188	 */
189	pushl	%eax
190	call	runtime_irq_stats
191	popl	%eax
192#endif
193
194	xorl	%eax, %eax
195#if defined(CONFIG_X2APIC)
196	xorl	%edx, %edx
197	movl	$(X86_X2APIC_BASE_MSR + (LOAPIC_EOI >> 4)), %ecx
198	wrmsr
199#else /* xAPIC */
200#ifdef DEVICE_MMIO_IS_IN_RAM
201	movl	Z_TOPLEVEL_RAM_NAME(LOAPIC_REGS_STR), %edx
202	movl	%eax, LOAPIC_EOI(%edx)
203#else
204	movl	%eax, (LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
205#endif /* DEVICE_MMIO_IS_IN_RAM */
206#endif /* CONFIG_X2APIC */
207
208	/* determine whether exiting from a nested interrupt */
209	movl	$_kernel, %ecx
210	decl	_kernel_offset_to_nested(%ecx)	/* dec interrupt nest count */
211	jne	nestedInterrupt                 /* 'iret' if nested case */
212
213#ifdef CONFIG_PREEMPT_ENABLED
214	movl	_kernel_offset_to_current(%ecx), %edx
215
216	/* reschedule only if the scheduler says that we must do so */
217	cmpl	%edx, _kernel_offset_to_ready_q_cache(%ecx)
218	je	noReschedule
219
220	/*
221	 * Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
222	 * to arch_swap() to determine whether non-floating registers need to be
223	 * preserved using the lazy save/restore algorithm, or to indicate to
224	 * debug tools that a preemptive context switch has occurred.
225	 */
226
227#if defined(CONFIG_LAZY_FPU_SHARING)
228	orb	$X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx)
229#endif
230
231	/*
232	 * A context reschedule is required: keep the volatile registers of
233	 * the interrupted thread on the context's stack.  Utilize
234	 * the existing arch_swap() primitive to save the remaining
235	 * thread's registers (including floating point) and perform
236	 * a switch to the new thread.
237	 */
238
239	popl	%esp	/* switch back to outgoing thread's stack */
240
241#ifdef CONFIG_STACK_SENTINEL
242	call	z_check_stack_sentinel
243#endif
244	pushfl			/* push KERNEL_LOCK_KEY argument */
245	call	arch_swap
246	addl 	$4, %esp	/* pop KERNEL_LOCK_KEY argument */
247
248	/*
249	 * The interrupted thread has now been scheduled,
250	 * as the result of a _later_ invocation of arch_swap().
251	 *
252	 * Now need to restore the interrupted thread's environment before
253	 * returning control to it at the point where it was interrupted ...
254	 */
255
256#if defined(CONFIG_LAZY_FPU_SHARING)
257	/*
258	 * arch_swap() has restored the floating point registers, if needed.
259	 * Clear X86_THREAD_FLAG_INT in the interrupted thread's state
260	 * since it has served its purpose.
261	 */
262
263	movl	_kernel + _kernel_offset_to_current, %eax
264	andb	$~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax)
265#endif /* CONFIG_LAZY_FPU_SHARING */
266
267	/* Restore volatile registers and return to the interrupted thread */
268	popl	%edi
269	popl	%ecx
270	popl	%edx
271	popl	%eax
272
273	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
274	KPTI_IRET
275
276#endif /* CONFIG_PREEMPT_ENABLED */
277
278noReschedule:
279
280	/*
281	 * A thread reschedule is not required; switch back to the
282	 * interrupted thread's stack and restore volatile registers
283	 */
284
285	popl	%esp		/* pop thread stack pointer */
286
287#ifdef CONFIG_STACK_SENTINEL
288	call	z_check_stack_sentinel
289#endif
290
291	/* fall through to 'nestedInterrupt' */
292
293
294	/*
295	 * For the nested interrupt case, the interrupt stack must still be
296	 * utilized, and more importantly, a rescheduling decision must
297	 * not be performed.
298	 */
299
300nestedInterrupt:
301	popl	%edi
302	popl	%ecx		/* pop volatile registers in reverse order */
303	popl	%edx
304	popl	%eax
305	/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
306	KPTI_IRET
307
308
309#ifdef CONFIG_PM
310handle_idle:
311	pushl	%eax
312	pushl	%edx
313	/* Zero out _kernel.idle */
314	movl	$0, _kernel_offset_to_idle(%ecx)
315
316	/*
317	 * Beware that a timer driver's z_pm_save_idle_exit() implementation might
318	 * expect that interrupts are disabled when invoked.  This ensures that
319	 * the calculation and programming of the device for the next timer
320	 * deadline is not interrupted.
321	 */
322
323	call	z_pm_save_idle_exit
324	popl	%edx
325	popl	%eax
326	jmp	alreadyOnIntStack
327#endif /* CONFIG_PM */
328
329/**
330 *
331 * z_SpuriousIntHandler -
332 * @brief Spurious interrupt handler stubs
333 *
334 * Interrupt-gate descriptors are statically created for all slots in the IDT
335 * that point to z_SpuriousIntHandler() or z_SpuriousIntNoErrCodeHandler().  The
336 * former stub is connected to exception vectors where the processor pushes an
337 * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
338 * records.
339 *
340 * A spurious interrupt is considered a fatal condition; there is no provision
341 * to return to the interrupted execution context and thus the volatile
342 * registers are not saved.
343 *
344 * @return Never returns
345 *
346 * C function prototype:
347 *
348 * void z_SpuriousIntHandler (void);
349 *
350 * INTERNAL
351 * The gen_idt tool creates an interrupt-gate descriptor for all
352 * connections.  The processor will automatically clear the IF bit
353 * in the EFLAGS register upon execution of the handler,
354 * thus z_SpuriousIntNoErrCodeHandler()/z_SpuriousIntHandler() shall be
355 * invoked with interrupts disabled.
356 */
357SECTION_FUNC(PINNED_TEXT, z_SpuriousIntNoErrCodeHandler)
358
359	pushl	$0			/* push dummy err code onto stk */
360
361	/* fall through to z_SpuriousIntHandler */
362
363
364SECTION_FUNC(PINNED_TEXT, z_SpuriousIntHandler)
365
366	cld				/* Clear direction flag */
367
368	/* Create the ESF */
369
370	pushl %eax
371	pushl %ecx
372	pushl %edx
373	pushl %edi
374	pushl %esi
375	pushl %ebx
376	pushl %ebp
377
378	leal	44(%esp), %ecx   /* Calculate ESP before exception occurred */
379	pushl	%ecx             /* Save calculated ESP */
380
381	pushl	%esp		/* push cur stack pointer: pEsf arg */
382
383	/* re-enable interrupts */
384	sti
385
386	/* call the fatal error handler */
387	call	z_x86_spurious_irq
388
389	/* handler doesn't  return */
390
391#if CONFIG_IRQ_OFFLOAD
392SECTION_FUNC(PINNED_TEXT, _irq_sw_handler)
393	push $0
394	push $z_irq_do_offload
395	jmp _interrupt_enter
396
397#endif
398
399#if CONFIG_X86_DYNAMIC_IRQ_STUBS > 0
400z_dynamic_irq_stub_common:
401	/* stub number already pushed */
402	push $z_x86_dynamic_irq_handler
403	jmp _interrupt_enter
404
405/* Create all the dynamic IRQ stubs
406 *
407 * NOTE: Please update DYN_STUB_SIZE in include/arch/x86/ia32/arch.h if you
408 * change how large the generated stubs are, otherwise _get_dynamic_stub()
409 * will be unable to correctly determine the offset
410 */
411
412/*
413 * Create nice labels for all the stubs so we can see where we
414 * are in a debugger
415 */
416.altmacro
417.macro __INT_STUB_NUM id
418z_dynamic_irq_stub_\id:
419.endm
420.macro INT_STUB_NUM id
421__INT_STUB_NUM %id
422.endm
423
424z_dynamic_stubs_begin:
425stub_num = 0
426.rept ((CONFIG_X86_DYNAMIC_IRQ_STUBS + Z_DYN_STUB_PER_BLOCK - 1) / Z_DYN_STUB_PER_BLOCK)
427	block_counter = 0
428	.rept Z_DYN_STUB_PER_BLOCK
429		.if stub_num < CONFIG_X86_DYNAMIC_IRQ_STUBS
430			INT_STUB_NUM stub_num
431			/*
432			 * 2-byte push imm8.
433			 */
434			push $stub_num
435
436			/*
437			 * Check to make sure this isn't the last stub in
438			 * a block, in which case we just fall through
439			 */
440			.if (block_counter <> (Z_DYN_STUB_PER_BLOCK - 1) && \
441			     (stub_num <> CONFIG_X86_DYNAMIC_IRQ_STUBS - 1))
442				/* This should always be a 2-byte jmp rel8 */
443				jmp 1f
444			.endif
445			stub_num = stub_num + 1
446			block_counter = block_counter + 1
447		.endif
448	.endr
449	/*
450	 * This must a 5-bvte jump rel32, which is why z_dynamic_irq_stub_common
451	 * is before the actual stubs
452	 */
4531:	jmp z_dynamic_irq_stub_common
454.endr
455#endif /* CONFIG_X86_DYNAMIC_IRQ_STUBS > 0 */
456
457