1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/linkage.h>
5#include <asm/unistd.h>
6#include <asm/assembler.h>
7#include <asm/nds32.h>
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/current.h>
11
12
13
14#ifdef CONFIG_HWZOL
15	.macro pop_zol
16	mtusr	$r14, $LB
17	mtusr	$r15, $LE
18	mtusr	$r16, $LC
19	.endm
20#endif
21
22	.macro	restore_user_regs_first
23	setgie.d
24	isb
25
26	addi	$sp, $sp, FUCOP_CTL_OFFSET
27
28	lmw.adm $r12, [$sp], $r24, #0x0
29	mtsr	$r12, $SP_USR
30	mtsr	$r13, $IPC
31#ifdef CONFIG_HWZOL
32	pop_zol
33#endif
34	mtsr	$r19, $PSW
35	mtsr	$r20, $IPSW
36	mtsr    $r21, $P_IPSW
37	mtsr	$r22, $P_IPC
38	mtsr	$r23, $P_P0
39	mtsr	$r24, $P_P1
40	lmw.adm $sp, [$sp], $sp, #0xe
41	.endm
42
43	.macro	restore_user_regs_last
44	pop	$p0
45	cmovn	$sp, $p0, $p0
46
47	iret
48	nop
49
50	.endm
51
52	.macro	restore_user_regs
53	restore_user_regs_first
54	lmw.adm $r0, [$sp], $r25, #0x0
55	addi	$sp, $sp, OSP_OFFSET
56	restore_user_regs_last
57	.endm
58
59	.macro	fast_restore_user_regs
60	restore_user_regs_first
61	lmw.adm $r1, [$sp], $r25, #0x0
62	addi	$sp, $sp, OSP_OFFSET-4
63	restore_user_regs_last
64	.endm
65
66#ifdef CONFIG_PREEMPT
67	.macro	preempt_stop
68	.endm
69#else
70	.macro	preempt_stop
71	setgie.d
72	isb
73	.endm
74#define	resume_kernel	no_work_pending
75#endif
76
77ENTRY(ret_from_exception)
78	preempt_stop
79ENTRY(ret_from_intr)
80
81/*
82 * judge Kernel or user mode
83 *
84 */
85	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
86	andi	$p0, $p0, #PSW_mskINTL
87	bnez	$p0, resume_kernel		! done with iret
88	j	resume_userspace
89
90
91/*
92 * This is the fast syscall return path.  We do as little as
93 * possible here, and this includes saving $r0 back into the SVC
94 * stack.
95 * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
96 */
97ENTRY(ret_fast_syscall)
98	gie_disable
99	lwi	$r1, [tsk+#TSK_TI_FLAGS]
100	andi	$p1, $r1, #_TIF_WORK_MASK
101	bnez	$p1, fast_work_pending
102	fast_restore_user_regs			! iret
103
104/*
105 * Ok, we need to do extra processing,
106 * enter the slow path returning from syscall, while pending work.
107 */
108fast_work_pending:
109	swi	$r0, [$sp+(#R0_OFFSET)]		! what is different from ret_from_exception
110work_pending:
111	andi	$p1, $r1, #_TIF_NEED_RESCHED
112	bnez	$p1, work_resched
113
114	andi	$p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
115	beqz	$p1, no_work_pending
116
117	move	$r0, $sp			! 'regs'
118	gie_enable
119	bal	do_notify_resume
120	b       ret_slow_syscall
121work_resched:
122	bal	schedule			! path, return to user mode
123
124/*
125 * "slow" syscall return path.
126 */
127ENTRY(resume_userspace)
128ENTRY(ret_slow_syscall)
129	gie_disable
130	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
131	andi	$p0, $p0, #PSW_mskINTL
132	bnez	$p0, no_work_pending		! done with iret
133	lwi	$r1, [tsk+#TSK_TI_FLAGS]
134	andi	$p1, $r1, #_TIF_WORK_MASK
135	bnez	$p1, work_pending		! handle work_resched, sig_pend
136
137no_work_pending:
138#ifdef CONFIG_TRACE_IRQFLAGS
139	lwi	$p0, [$sp+(#IPSW_OFFSET)]
140	andi	$p0, $p0, #0x1
141	la	$r10, __trace_hardirqs_off
142	la	$r9, __trace_hardirqs_on
143	cmovz	$r9, $p0, $r10
144	jral	$r9
145#endif
146	restore_user_regs			! return from iret
147
148
149/*
150 * preemptive kernel
151 */
152#ifdef CONFIG_PREEMPT
153resume_kernel:
154	gie_disable
155	lwi	$t0, [tsk+#TSK_TI_PREEMPT]
156	bnez	$t0, no_work_pending
157need_resched:
158	lwi	$t0, [tsk+#TSK_TI_FLAGS]
159	andi	$p1, $t0, #_TIF_NEED_RESCHED
160	beqz	$p1, no_work_pending
161
162	lwi	$t0, [$sp+(#IPSW_OFFSET)]	! Interrupts off?
163	andi	$t0, $t0, #1
164	beqz	$t0, no_work_pending
165
166	jal	preempt_schedule_irq
167	b	need_resched
168#endif
169
170/*
171 * This is how we return from a fork.
172 */
173ENTRY(ret_from_fork)
174	bal	schedule_tail
175	beqz	$r6, 1f				! r6 stores fn for kernel thread
176	move	$r0, $r7			! prepare kernel thread arg
177	jral	$r6
1781:
179	lwi	$r1, [tsk+#TSK_TI_FLAGS]		! check for syscall tracing
180	andi	$p1, $r1, #_TIF_WORK_SYSCALL_LEAVE	! are we tracing syscalls?
181	beqz	$p1, ret_slow_syscall
182	move    $r0, $sp
183	bal	syscall_trace_leave
184	b	ret_slow_syscall
185