1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * arch/sh/kernel/cpu/sh3/entry.S
4 *
5 *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
6 *  Copyright (C) 2003 - 2012  Paul Mundt
7 */
8#include <linux/sys.h>
9#include <linux/errno.h>
10#include <linux/linkage.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <asm/unistd.h>
14#include <cpu/mmu_context.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17#include <asm/thread_info.h>
18
19! NOTE:
20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
21! to be jumped is too far, but it causes illegal slot exception.
22
23/*
24 * entry.S contains the system-call and fault low-level handling routines.
25 * This also contains the timer-interrupt handler, as well as all interrupts
26 * and faults that can result in a task-switch.
27 *
28 * NOTE: This code handles signal-recognition, which happens every time
29 * after a timer-interrupt and after each system call.
30 *
31 * NOTE: This code uses a convention that instructions in the delay slot
32 * of a transfer-control instruction are indented by an extra space, thus:
33 *
34 *    jmp	@k0	    ! control-transfer instruction
35 *     ldc	k1, ssr     ! delay slot
36 *
37 * Stack layout in 'ret_from_syscall':
38 * 	ptrace needs to have all regs on the stack.
39 *	if the order here is changed, it needs to be
40 *	updated in ptrace.c and ptrace.h
41 *
42 *	r0
43 *      ...
44 *	r15 = stack pointer
45 *	spc
46 *	pr
47 *	ssr
48 *	gbr
49 *	mach
50 *	macl
51 *	syscall #
52 *
53 */
54/* Offsets to the stack */
55OFF_R0  =  0		/* Return value. New ABI also arg4 */
56OFF_R1  =  4     	/* New ABI: arg5 */
57OFF_R2  =  8     	/* New ABI: arg6 */
58OFF_R3  =  12     	/* New ABI: syscall_nr */
59OFF_R4  =  16     	/* New ABI: arg0 */
60OFF_R5  =  20     	/* New ABI: arg1 */
61OFF_R6  =  24     	/* New ABI: arg2 */
62OFF_R7  =  28     	/* New ABI: arg3 */
63OFF_SP	=  (15*4)
64OFF_PC  =  (16*4)
65OFF_SR	=  (16*4+8)
66OFF_TRA	=  (16*4+6*4)
67
68#define k0	r0
69#define k1	r1
70#define k2	r2
71#define k3	r3
72#define k4	r4
73
74#define g_imask		r6	/* r6_bank1 */
75#define k_g_imask	r6_bank	/* r6_bank1 */
76#define current		r7	/* r7_bank1 */
77
78#include <asm/entry-macros.S>
79
80/*
81 * Kernel mode register usage:
82 *	k0	scratch
83 *	k1	scratch
84 *	k2	scratch (Exception code)
85 *	k3	scratch (Return address)
86 *	k4	scratch
87 *	k5	reserved
88 *	k6	Global Interrupt Mask (0--15 << 4)
89 *	k7	CURRENT_THREAD_INFO (pointer to current thread info)
90 */
91
92!
93! TLB Miss / Initial Page write exception handling
94!			_and_
95! TLB hits, but the access violate the protection.
96! It can be valid access, such as stack grow and/or C-O-W.
97!
98!
99! Find the pmd/pte entry and loadtlb
100! If it's not found, cause address error (SEGV)
101!
102! Although this could be written in assembly language (and it'd be faster),
103! this first version depends *much* on C implementation.
104!
105
106#if defined(CONFIG_MMU)
107	.align	2
108ENTRY(tlb_miss_load)
109	bra	call_handle_tlbmiss
110	 mov	#0, r5
111
112	.align	2
113ENTRY(tlb_miss_store)
114	bra	call_handle_tlbmiss
115	 mov	#FAULT_CODE_WRITE, r5
116
117	.align	2
118ENTRY(initial_page_write)
119	bra	call_handle_tlbmiss
120	 mov	#FAULT_CODE_INITIAL, r5
121
122	.align	2
123ENTRY(tlb_protection_violation_load)
124	bra	call_do_page_fault
125	 mov	#FAULT_CODE_PROT, r5
126
127	.align	2
128ENTRY(tlb_protection_violation_store)
129	bra	call_do_page_fault
130	 mov	#(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
131
132call_handle_tlbmiss:
133	mov.l	1f, r0
134	mov	r5, r8
135	mov.l	@r0, r6
136	mov.l	2f, r0
137	sts	pr, r10
138	jsr	@r0
139	 mov	r15, r4
140	!
141	tst	r0, r0
142	bf/s	0f
143	 lds	r10, pr
144	rts
145	 nop
1460:
147	mov	r8, r5
148call_do_page_fault:
149	mov.l	1f, r0
150	mov.l	@r0, r6
151
152	mov.l	3f, r0
153	mov.l	4f, r1
154	mov	r15, r4
155	jmp	@r0
156	 lds	r1, pr
157
158	.align 2
1591:	.long	MMU_TEA
1602:	.long	handle_tlbmiss
1613:	.long	do_page_fault
1624:	.long	ret_from_exception
163
164	.align	2
165ENTRY(address_error_load)
166	bra	call_dae
167	 mov	#0,r5		! writeaccess = 0
168
169	.align	2
170ENTRY(address_error_store)
171	bra	call_dae
172	 mov	#1,r5		! writeaccess = 1
173
174	.align	2
175call_dae:
176	mov.l	1f, r0
177	mov.l	@r0, r6		! address
178	mov.l	2f, r0
179	jmp	@r0
180	 mov	r15, r4		! regs
181
182	.align 2
1831:	.long	MMU_TEA
1842:	.long   do_address_error
185#endif /* CONFIG_MMU */
186
187#if defined(CONFIG_SH_STANDARD_BIOS)
188	/* Unwind the stack and jmp to the debug entry */
189ENTRY(sh_bios_handler)
190	mov.l	1f, r8
191	bsr	restore_regs
192	 nop
193
194	lds	k2, pr			! restore pr
195	mov	k4, r15
196	!
197	mov.l	2f, k0
198	mov.l	@k0, k0
199	jmp	@k0
200	 ldc	k3, ssr
201	.align	2
2021:	.long	0x300000f0
2032:	.long	gdb_vbr_vector
204#endif /* CONFIG_SH_STANDARD_BIOS */
205
206! restore_regs()
207! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
208! - switch bank
209! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
210! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
211! k2 returns original pr
212! k3 returns original sr
213! k4 returns original stack pointer
214! r8 passes SR bitmask, overwritten with restored data on return
215! r9 trashed
216! BL=0 on entry, on exit BL=1 (depending on r8).
217
218ENTRY(restore_regs)
219	mov.l	@r15+, r0
220	mov.l	@r15+, r1
221	mov.l	@r15+, r2
222	mov.l	@r15+, r3
223	mov.l	@r15+, r4
224	mov.l	@r15+, r5
225	mov.l	@r15+, r6
226	mov.l	@r15+, r7
227	!
228	stc	sr, r9
229	or	r8, r9
230	ldc	r9, sr
231	!
232	mov.l	@r15+, r8
233	mov.l	@r15+, r9
234	mov.l	@r15+, r10
235	mov.l	@r15+, r11
236	mov.l	@r15+, r12
237	mov.l	@r15+, r13
238	mov.l	@r15+, r14
239	mov.l	@r15+, k4		! original stack pointer
240	ldc.l	@r15+, spc
241	mov.l	@r15+, k2		! original PR
242	mov.l	@r15+, k3		! original SR
243	ldc.l	@r15+, gbr
244	lds.l	@r15+, mach
245	lds.l	@r15+, macl
246	rts
247	 add	#4, r15			! Skip syscall number
248
249restore_all:
250	mov.l	7f, r8
251	bsr	restore_regs
252	 nop
253
254	lds	k2, pr			! restore pr
255	!
256	! Calculate new SR value
257	mov	k3, k2			! original SR value
258	mov	#0xfffffff0, k1
259	extu.b	k1, k1
260	not	k1, k1
261	and	k1, k2			! Mask original SR value
262	!
263	mov	k3, k0			! Calculate IMASK-bits
264	shlr2	k0
265	and	#0x3c, k0
266	cmp/eq	#0x3c, k0
267	bt/s	6f
268	 shll2	k0
269	mov	g_imask, k0
270	!
2716:	or	k0, k2			! Set the IMASK-bits
272	ldc	k2, ssr
273	!
274	mov	k4, r15
275	rte
276	 nop
277
278	.align	2
2795:	.long	0x00001000	! DSP
2807:	.long	0x30000000
281
282! common exception handler
283#include "../../entry-common.S"
284
285! Exception Vector Base
286!
287!	Should be aligned page boundary.
288!
289	.balign 	4096,0,4096
290ENTRY(vbr_base)
291	.long	0
292!
293! 0x100: General exception vector
294!
295	.balign 	256,0,256
296general_exception:
297	bra	handle_exception
298	 sts	pr, k3		! save original pr value in k3
299
300! prepare_stack()
301! - roll back gRB
302! - switch to kernel stack
303! k0 returns original sp (after roll back)
304! k1 trashed
305! k2 trashed
306
307prepare_stack:
308#ifdef CONFIG_GUSA
309	! Check for roll back gRB (User and Kernel)
310	mov	r15, k0
311	shll	k0
312	bf/s	1f
313	 shll	k0
314	bf/s	1f
315	 stc	spc, k1
316	stc	r0_bank, k0
317	cmp/hs	k0, k1		! test k1 (saved PC) >= k0 (saved r0)
318	bt/s	2f
319	 stc	r1_bank, k1
320
321	add	#-2, k0
322	add	r15, k0
323	ldc	k0, spc		! PC = saved r0 + r15 - 2
3242:	mov	k1, r15		! SP = r1
3251:
326#endif
327	! Switch to kernel stack if needed
328	stc	ssr, k0		! Is it from kernel space?
329	shll	k0		! Check MD bit (bit30) by shifting it into...
330	shll	k0		!       ...the T bit
331	bt/s	1f		! It's a kernel to kernel transition.
332	 mov	r15, k0		! save original stack to k0
333	/* User space to kernel */
334	mov	#(THREAD_SIZE >> 10), k1
335	shll8	k1		! k1 := THREAD_SIZE
336	shll2	k1
337	add	current, k1
338	mov	k1, r15		! change to kernel stack
339	!
3401:
341	rts
342	 nop
343
344!
345! 0x400: Instruction and Data TLB miss exception vector
346!
347	.balign 	1024,0,1024
348tlb_miss:
349	sts	pr, k3		! save original pr value in k3
350
351handle_exception:
352	mova	exception_data, k0
353
354	! Setup stack and save DSP context (k0 contains original r15 on return)
355	bsr	prepare_stack
356	 PREF(k0)
357
358	! Save registers / Switch to bank 0
359	mov.l	5f, k2		! vector register address
360	mov.l	1f, k4		! SR bits to clear in k4
361	bsr	save_regs	! needs original pr value in k3
362	 mov.l	@k2, k2		! read out vector and keep in k2
363
364handle_exception_special:
365	setup_frame_reg
366
367	! Setup return address and jump to exception handler
368	mov.l	7f, r9		! fetch return address
369	stc	r2_bank, r0	! k2 (vector)
370	mov.l	6f, r10
371	shlr2	r0
372	shlr	r0
373	mov.l	@(r0, r10), r10
374	jmp	@r10
375	 lds	r9, pr		! put return address in pr
376
377	.align	L1_CACHE_SHIFT
378
379! save_regs()
380! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
381! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
382! - switch bank
383! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
384! k0 contains original stack pointer*
385! k1 trashed
386! k3 passes original pr*
387! k4 passes SR bitmask
388! BL=1 on entry, on exit BL=0.
389
390ENTRY(save_regs)
391	mov	#-1, r1
392	mov.l	k1, @-r15	! set TRA (default: -1)
393	sts.l	macl, @-r15
394	sts.l	mach, @-r15
395	stc.l	gbr, @-r15
396	stc.l	ssr, @-r15
397	mov.l	k3, @-r15	! original pr in k3
398	stc.l	spc, @-r15
399
400	mov.l	k0, @-r15	! original stack pointer in k0
401	mov.l	r14, @-r15
402	mov.l	r13, @-r15
403	mov.l	r12, @-r15
404	mov.l	r11, @-r15
405	mov.l	r10, @-r15
406	mov.l	r9, @-r15
407	mov.l	r8, @-r15
408
409	mov.l	0f, k3		! SR bits to set in k3
410
411	! fall-through
412
413! save_low_regs()
414! - modify SR for bank switch
415! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
416! k3 passes bits to set in SR
417! k4 passes bits to clear in SR
418
419ENTRY(save_low_regs)
420	stc	sr, r8
421	or	k3, r8
422	and	k4, r8
423	ldc	r8, sr
424
425	mov.l	r7, @-r15
426	mov.l	r6, @-r15
427	mov.l	r5, @-r15
428	mov.l	r4, @-r15
429	mov.l	r3, @-r15
430	mov.l	r2, @-r15
431	mov.l	r1, @-r15
432	rts
433	 mov.l	r0, @-r15
434
435!
436! 0x600: Interrupt / NMI vector
437!
438	.balign 	512,0,512
439ENTRY(handle_interrupt)
440	sts	pr, k3		! save original pr value in k3
441	mova	exception_data, k0
442
443	! Setup stack and save DSP context (k0 contains original r15 on return)
444	bsr	prepare_stack
445	 PREF(k0)
446
447	! Save registers / Switch to bank 0
448	mov.l	1f, k4		! SR bits to clear in k4
449	bsr	save_regs	! needs original pr value in k3
450	 mov	#-1, k2		! default vector kept in k2
451
452	setup_frame_reg
453
454	stc	sr, r0	! get status register
455	shlr2	r0
456	and	#0x3c, r0
457	cmp/eq	#0x3c, r0
458	bf	9f
459	TRACE_IRQS_OFF
4609:
461
462	! Setup return address and jump to do_IRQ
463	mov.l	4f, r9		! fetch return address
464	lds	r9, pr		! put return address in pr
465	mov.l	2f, r4
466	mov.l	3f, r9
467	mov.l	@r4, r4		! pass INTEVT vector as arg0
468
469	shlr2	r4
470	shlr	r4
471	mov	r4, r0		! save vector->jmp table offset for later
472
473	shlr2	r4		! vector to IRQ# conversion
474	add	#-0x10, r4
475
476	cmp/pz	r4		! is it a valid IRQ?
477	bt	10f
478
479	/*
480	 * We got here as a result of taking the INTEVT path for something
481	 * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
482	 * path and special case the event dispatch instead.  This is the
483	 * expected path for the NMI (and any other brilliantly implemented
484	 * exception), which effectively wants regular exception dispatch
485	 * but is unfortunately reported through INTEVT rather than
486	 * EXPEVT.  Grr.
487	 */
488	mov.l	6f, r9
489	mov.l	@(r0, r9), r9
490	jmp	@r9
491	 mov	r15, r8		! trap handlers take saved regs in r8
492
49310:
494	jmp	@r9		! Off to do_IRQ() we go.
495	 mov	r15, r5		! pass saved registers as arg1
496
497ENTRY(exception_none)
498	rts
499	 nop
500
501	.align	L1_CACHE_SHIFT
502exception_data:
5030:	.long	0x000080f0	! FD=1, IMASK=15
5041:	.long	0xcfffffff	! RB=0, BL=0
5052:	.long	INTEVT
5063:	.long	do_IRQ
5074:	.long	ret_from_irq
5085:	.long	EXPEVT
5096:	.long	exception_handling_table
5107:	.long	ret_from_exception
511