1/*
2 * Copyright (c) 2019-2020 Cobham Gaisler AB
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <toolchain.h>
8#include <linker/sections.h>
9#include <offsets_short.h>
10#include <arch/sparc/sparc.h>
11#include "stack_offsets.h"
12
13GTEXT(__sparc_trap_interrupt)
14GTEXT(__sparc_trap_irq_offload)
15
16/*
17 * Interrupt trap handler
18 *
19 * - IU state is saved and restored
20 *
21 * On entry:
22 * %l0: psr (set by trap code)
23 * %l1: pc
24 * %l2: npc
25 * %l3: SPARC interrupt request level (bp_IRL)
26 * %fp: %sp of current register window at trap time
27 *
28 * This module also implements the IRQ offload support. The handling is the
29 * same as for asynchronous maskable interrupts, with the following exceptions:
30 * - Do not re-execute the causing (ta) instruction at trap exit.
31 * - A dedicated interrupt request level (0x8d) is used.
32 * - z_sparc_enter_irq() knows how to interpret this interrupt request level.
33 */
34SECTION_SUBSEC_FUNC(TEXT, __sparc_trap_interrupt, __sparc_trap_irq_offload)
35	/* Preparation in the case of synchronous IRQ offload. */
36	mov	%l2, %l1
37	add	%l2, 4, %l2
38	set	0x8d, %l3
39
40__sparc_trap_interrupt:
41	/* %g2, %g3 are used at manual window overflow so save temporarily */
42	mov	%g2, %l4
43	mov	%g3, %l5
44
45	/* We may have trapped into the invalid window. If so, make it valid. */
46	rd	%wim, %g2
47	srl	%g2, %l0, %g3
48	cmp	%g3, 1
49	bne	.Lwodone
50	 nop
51
52	/* Do the window overflow. */
53	sll	%g2, (CONFIG_SPARC_NWIN-1), %g3
54	srl	%g2, 1, %g2
55	or	%g2, %g3, %g2
56
57	/* Enter window to save. */
58	save
59	/* Install new wim calculated above. */
60	mov	%g2, %wim
61	nop
62	nop
63	nop
64	/* Put registers on the dedicated save area of the ABI stack frame. */
65	std	%l0, [%sp + 0x00]
66	std	%l2, [%sp + 0x08]
67	std	%l4, [%sp + 0x10]
68	std	%l6, [%sp + 0x18]
69	std	%i0, [%sp + 0x20]
70	std	%i2, [%sp + 0x28]
71	std	%i4, [%sp + 0x30]
72	std	%i6, [%sp + 0x38]
73	/* Leave saved window. */
74	restore
75
76.Lwodone:
77	/*
78	 * %l4: %g2 at trap time
79	 * %l5: %g3 at trap time
80	 *
81	 * Save the state of the interrupted task including global registers on
82	 * the task stack.
83	 *
84	 * IMPORTANT: Globals are saved here as well on the task stack, since a
85	 * context switch might happen before the context of this interrupted
86	 * task is restored.
87	 */
88
89	/* Allocate stack for isr context. */
90	sub	%fp, ISF_SIZE, %sp
91	/*
92	 * %fp: %sp of interrupted task
93	 * %sp: %sp of interrupted task - ISF_SIZE.
94	 *      (fits what we store here)
95	 *
96	 * Save the interrupted context.
97	 */
98	std	%l0, [%sp + ISF_PSR_OFFSET]     /* psr pc */
99	st	%l2, [%sp + ISF_NPC_OFFSET]     /* npc */
100	st	%g1, [%sp + ISF_G1_OFFSET]      /* g1 */
101	std	%l4, [%sp + ISF_G2_OFFSET]      /* g2  g3 */
102	st	%g4, [%sp + ISF_G4_OFFSET]      /* g4 */
103	rd	%y, %g1
104	st	%g1, [%sp + ISF_Y_OFFSET]	/* y */
105
106	/* %l5: reference to _kernel */
107	set	_kernel, %l5
108	/* Switch to interrupt stack. */
109	mov	%sp, %fp
110	ld	[%l5 + _kernel_offset_to_irq_stack], %sp
111
112	/* Allocate a full C stack frame */
113	sub	%sp, STACK_FRAME_SIZE, %sp
114	/*
115	 * %fp: %sp of interrupted task - ISF_SIZE.
116	 * %sp: irq stack - 96. An ABI frame
117	 */
118
119	/* Enable traps, raise PIL to mask all maskable interrupts. */
120	or	%l0, PSR_PIL, %l6
121
122#if defined(CONFIG_FPU)
123	/*
124	 * We now check if the interrupted context was using the FPU. The
125	 * result is stored in register l5 which will either get the value 0
126	 * (FPU not used) or PSR_EF (FPU used).
127	 *
128	 * If the FPU was used by the interrupted context, then we do two
129	 * things:
130	 * 1. Store FSR to memory. This has the side-effect of completing all
131	 *    pending FPU operations.
132	 * 2. Disable FPU. Floating point instructions in the ISR will trap.
133	 *
134	 * The FPU is be enabled again if needed after the ISR has returned.
135	 */
136	set	PSR_EF, %l5
137	andcc	%l0, %l5, %l5
138	bne,a	1f
139	 st	%fsr, [%sp]
1401:
141	andn	%l6, %l5, %l6
142#endif
143	wr	%l6, PSR_ET, %psr
144	nop
145	nop
146	nop
147
148#ifdef CONFIG_TRACING_ISR
149	call	sys_trace_isr_enter
150	 nop
151#endif
152
153	/* SPARC interrupt request level is the first argument */
154	call	z_sparc_enter_irq
155	 mov	%l3, %o0
156
157#ifdef CONFIG_TRACING_ISR
158	call	sys_trace_isr_exit
159	 nop
160#endif
161
162	/*
163	 * %fp: %sp of interrupted task - ISF_SIZE.
164	 * %sp: irq stack - 96. An ABI frame
165	 */
166
167#ifdef CONFIG_PREEMPT_ENABLED
168	/* allocate stack for calling C function and for its output value */
169	sub	%fp, (96+8), %sp
170	/*
171	 * %fp: %sp of interrupted task - ISF_SIZE.
172	 * %sp: %sp of interrupted task - ISF_SIZE - STACK_FRAME_SIZE - 8.
173	 */
174	call	z_arch_get_next_switch_handle
175	 add	%sp, 96, %o0
176	/* we get old thread as "return value" on stack */
177	ld	[%sp + 96], %o1
178	/*
179	 * o0: new thread
180	 * o1: old thread
181	 */
182	cmp	%o0, %o1
183	beq	.Lno_reschedule
184	/* z_sparc_context_switch() is a leaf function not using stack. */
185	 add	%sp, (96+8-64), %sp
186
187#if defined(CONFIG_FPU_SHARING)
188	/* IF PSR_EF at trap time then store the FP context. */
189	cmp	%l5, 0
190	be	.Lno_fp_context
191	 nop
192
193	/*
194	 * PSR_EF was 1 at trap time so save the FP registers on stack.
195	 * - Set PSR_EF so we can access the FP registers.
196	 * - Allocate space for the FP registers above the save area used for
197	 *   the z_sparc_context_switch() call.
198	 */
199	wr	%l6, %l5, %psr
200	nop
201	nop
202	nop
203
204	sub	%sp, 34 * 4, %sp
205	std	%f0,  [%sp + 64 + 0x00]
206	std	%f2,  [%sp + 64 + 0x08]
207	std	%f4,  [%sp + 64 + 0x10]
208	std	%f6,  [%sp + 64 + 0x18]
209	std	%f8,  [%sp + 64 + 0x20]
210	std	%f10, [%sp + 64 + 0x28]
211	std	%f12, [%sp + 64 + 0x30]
212	std	%f14, [%sp + 64 + 0x38]
213	std	%f16, [%sp + 64 + 0x40]
214	std	%f18, [%sp + 64 + 0x48]
215	std	%f20, [%sp + 64 + 0x50]
216	std	%f22, [%sp + 64 + 0x58]
217	std	%f24, [%sp + 64 + 0x60]
218	std	%f26, [%sp + 64 + 0x68]
219	std	%f28, [%sp + 64 + 0x70]
220	std	%f30, [%sp + 64 + 0x78]
221
222	call	z_sparc_context_switch
223	 st	%fsr, [%sp + 64 + 0x80]
224
225	ldd	[%sp + 64 + 0x00], %f0
226	ldd	[%sp + 64 + 0x08], %f2
227	ldd	[%sp + 64 + 0x10], %f4
228	ldd	[%sp + 64 + 0x18], %f6
229	ldd	[%sp + 64 + 0x20], %f8
230	ldd	[%sp + 64 + 0x28], %f10
231	ldd	[%sp + 64 + 0x30], %f12
232	ldd	[%sp + 64 + 0x38], %f14
233	ldd	[%sp + 64 + 0x40], %f16
234	ldd	[%sp + 64 + 0x48], %f18
235	ldd	[%sp + 64 + 0x50], %f20
236	ldd	[%sp + 64 + 0x58], %f22
237	ldd	[%sp + 64 + 0x60], %f24
238	ldd	[%sp + 64 + 0x68], %f26
239	ldd	[%sp + 64 + 0x70], %f28
240	ldd	[%sp + 64 + 0x78], %f30
241	ld	[%sp + 64 + 0x80], %fsr
242	ba      .Lno_reschedule
243	 add	%sp, 34 * 4, %sp
244.Lno_fp_context:
245#endif /* CONFIG_FPU_SHARING */
246
247	call	z_sparc_context_switch
248	 nop
249.Lno_reschedule:
250#endif /* CONFIG_PREEMPT_ENABLED */
251
252	/* Restore the interrupted context. */
253	ld	[%fp + ISF_Y_OFFSET], %g1
254	wr	%g1, 0, %y
255
256	ldd	[%fp + ISF_PSR_OFFSET], %l0     /* psr, pc */
257	ld	[%fp + ISF_NPC_OFFSET], %l2     /* npc */
258	/* NOTE: %g1 will be restored later */
259
260	/* %g1 is used to access the stack frame later */
261	mov	%fp, %g1
262	ldd	[%fp + ISF_G2_OFFSET], %g2
263	ld	[%fp + ISF_G4_OFFSET], %g4
264	add	%fp, ISF_SIZE,  %fp
265
266	/*
267	 * Install the PSR we got from the interrupt context. Current PSR.CWP
268	 * is preserved. Keep PSR.ET=0 until we do "rett".
269	 */
270	rd	%psr, %l3
271	and	%l3, PSR_CWP, %l3
272	andn	%l0, (PSR_CWP | PSR_ET), %l0
273	or	%l3, %l0, %l0
274	mov	%l0, %psr
275	nop
276	nop
277	nop
278
279	/* Calculate %l6 := (cwp+1) % NWIN */
280	rd	%wim, %l3
281	set	(CONFIG_SPARC_NWIN), %l7
282	add	%l0, 1, %l6
283	and	%l6, PSR_CWP, %l6
284	cmp	%l6, %l7
285	bge,a	.Lwrapok
286	 mov	0, %l6
287
288.Lwrapok:
289	/* Determine if we must prepare the return window. */
290	/* %l5 := %wim >> (cwp+1) */
291	srl	%l3, %l6, %l5
292	/* %l5 is 1 if (cwp+1) is an invalid window */
293	cmp	%l5, 1
294	bne	.Lwudone
295	 sub	%l7, 1, %l7             /* %l7 := NWIN - 1 */
296
297	/* Do the window underflow. */
298	sll	%l3, 1, %l4
299	srl	%l3, %l7, %l5
300	wr	%l4, %l5, %wim
301	nop
302	nop
303	nop
304
305	restore
306	ldd	[%g1 + 0x00], %l0
307	ldd	[%g1 + 0x08], %l2
308	ldd	[%g1 + 0x10], %l4
309	ldd	[%g1 + 0x18], %l6
310	ldd	[%g1 + 0x20], %i0
311	ldd	[%g1 + 0x28], %i2
312	ldd	[%g1 + 0x30], %i4
313	ldd	[%g1 + 0x38], %i6
314	save
315
316.Lwudone:
317	/*
318	 * Restore %psr since we may have trashed condition codes. PSR.ET is
319	 * still 0.
320	 */
321	wr	%l0, %psr
322	nop
323	nop
324	nop
325
326	/* restore g1 */
327	ld	[%g1 + ISF_G1_OFFSET], %g1
328
329	jmp	%l1
330	 rett	%l2
331