1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/kernel/crunch-bits.S
4 * Cirrus MaverickCrunch context switching and handling
5 *
6 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
7 *
8 * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
9 * Copyright (c) 2003-2004, MontaVista Software, Inc.
10 */
11
12#include <linux/linkage.h>
13#include <asm/ptrace.h>
14#include <asm/thread_info.h>
15#include <asm/asm-offsets.h>
16#include <asm/assembler.h>
17#include <mach/ep93xx-regs.h>
18
19/*
20 * We can't use hex constants here due to a bug in gas.
21 */
22#define CRUNCH_MVDX0		0
23#define CRUNCH_MVDX1		8
24#define CRUNCH_MVDX2		16
25#define CRUNCH_MVDX3		24
26#define CRUNCH_MVDX4		32
27#define CRUNCH_MVDX5		40
28#define CRUNCH_MVDX6		48
29#define CRUNCH_MVDX7		56
30#define CRUNCH_MVDX8		64
31#define CRUNCH_MVDX9		72
32#define CRUNCH_MVDX10		80
33#define CRUNCH_MVDX11		88
34#define CRUNCH_MVDX12		96
35#define CRUNCH_MVDX13		104
36#define CRUNCH_MVDX14		112
37#define CRUNCH_MVDX15		120
38#define CRUNCH_MVAX0L		128
39#define CRUNCH_MVAX0M		132
40#define CRUNCH_MVAX0H		136
41#define CRUNCH_MVAX1L		140
42#define CRUNCH_MVAX1M		144
43#define CRUNCH_MVAX1H		148
44#define CRUNCH_MVAX2L		152
45#define CRUNCH_MVAX2M		156
46#define CRUNCH_MVAX2H		160
47#define CRUNCH_MVAX3L		164
48#define CRUNCH_MVAX3M		168
49#define CRUNCH_MVAX3H		172
50#define CRUNCH_DSPSC		176
51
52#define CRUNCH_SIZE		184
53
54	.text
55
56/*
57 * Lazy switching of crunch coprocessor context
58 *
59 * r10 = struct thread_info pointer
60 * r9  = ret_from_exception
61 * lr  = undefined instr exit
62 *
63 * called from prefetch exception handler with interrupts enabled
64 */
65ENTRY(crunch_task_enable)
66	inc_preempt_count r10, r3
67
68	ldr	r8, =(EP93XX_APB_VIRT_BASE + 0x00130000)	@ syscon addr
69
70	ldr	r1, [r8, #0x80]
71	tst	r1, #0x00800000			@ access to crunch enabled?
72	bne	2f				@ if so no business here
73	mov	r3, #0xaa			@ unlock syscon swlock
74	str	r3, [r8, #0xc0]
75	orr	r1, r1, #0x00800000		@ enable access to crunch
76	str	r1, [r8, #0x80]
77
78	ldr	r3, =crunch_owner
79	add	r0, r10, #TI_CRUNCH_STATE	@ get task crunch save area
80	ldr	r2, [sp, #60]			@ current task pc value
81	ldr	r1, [r3]			@ get current crunch owner
82	str	r0, [r3]			@ this task now owns crunch
83	sub	r2, r2, #4			@ adjust pc back
84	str	r2, [sp, #60]
85
86	ldr	r2, [r8, #0x80]
87	mov	r2, r2				@ flush out enable (@@@)
88
89	teq	r1, #0				@ test for last ownership
90	mov	lr, r9				@ normal exit from exception
91	beq	crunch_load			@ no owner, skip save
92
93crunch_save:
94	cfstr64		mvdx0, [r1, #CRUNCH_MVDX0]	@ save 64b registers
95	cfstr64		mvdx1, [r1, #CRUNCH_MVDX1]
96	cfstr64		mvdx2, [r1, #CRUNCH_MVDX2]
97	cfstr64		mvdx3, [r1, #CRUNCH_MVDX3]
98	cfstr64		mvdx4, [r1, #CRUNCH_MVDX4]
99	cfstr64		mvdx5, [r1, #CRUNCH_MVDX5]
100	cfstr64		mvdx6, [r1, #CRUNCH_MVDX6]
101	cfstr64		mvdx7, [r1, #CRUNCH_MVDX7]
102	cfstr64		mvdx8, [r1, #CRUNCH_MVDX8]
103	cfstr64		mvdx9, [r1, #CRUNCH_MVDX9]
104	cfstr64		mvdx10, [r1, #CRUNCH_MVDX10]
105	cfstr64		mvdx11, [r1, #CRUNCH_MVDX11]
106	cfstr64		mvdx12, [r1, #CRUNCH_MVDX12]
107	cfstr64		mvdx13, [r1, #CRUNCH_MVDX13]
108	cfstr64		mvdx14, [r1, #CRUNCH_MVDX14]
109	cfstr64		mvdx15, [r1, #CRUNCH_MVDX15]
110
111#ifdef __ARMEB__
112#error fix me for ARMEB
113#endif
114
115	cfmv32al	mvfx0, mvax0			@ save 72b accumulators
116	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0L]
117	cfmv32am	mvfx0, mvax0
118	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0M]
119	cfmv32ah	mvfx0, mvax0
120	cfstr32		mvfx0, [r1, #CRUNCH_MVAX0H]
121	cfmv32al	mvfx0, mvax1
122	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1L]
123	cfmv32am	mvfx0, mvax1
124	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1M]
125	cfmv32ah	mvfx0, mvax1
126	cfstr32		mvfx0, [r1, #CRUNCH_MVAX1H]
127	cfmv32al	mvfx0, mvax2
128	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2L]
129	cfmv32am	mvfx0, mvax2
130	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2M]
131	cfmv32ah	mvfx0, mvax2
132	cfstr32		mvfx0, [r1, #CRUNCH_MVAX2H]
133	cfmv32al	mvfx0, mvax3
134	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3L]
135	cfmv32am	mvfx0, mvax3
136	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3M]
137	cfmv32ah	mvfx0, mvax3
138	cfstr32		mvfx0, [r1, #CRUNCH_MVAX3H]
139
140	cfmv32sc	mvdx0, dspsc			@ save status word
141	cfstr64		mvdx0, [r1, #CRUNCH_DSPSC]
142
143	teq		r0, #0				@ anything to load?
144	cfldr64eq	mvdx0, [r1, #CRUNCH_MVDX0]	@ mvdx0 was clobbered
145	beq		1f
146
147crunch_load:
148	cfldr64		mvdx0, [r0, #CRUNCH_DSPSC]	@ load status word
149	cfmvsc32	dspsc, mvdx0
150
151	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0L]	@ load 72b accumulators
152	cfmval32	mvax0, mvfx0
153	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0M]
154	cfmvam32	mvax0, mvfx0
155	cfldr32		mvfx0, [r0, #CRUNCH_MVAX0H]
156	cfmvah32	mvax0, mvfx0
157	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1L]
158	cfmval32	mvax1, mvfx0
159	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1M]
160	cfmvam32	mvax1, mvfx0
161	cfldr32		mvfx0, [r0, #CRUNCH_MVAX1H]
162	cfmvah32	mvax1, mvfx0
163	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2L]
164	cfmval32	mvax2, mvfx0
165	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2M]
166	cfmvam32	mvax2, mvfx0
167	cfldr32		mvfx0, [r0, #CRUNCH_MVAX2H]
168	cfmvah32	mvax2, mvfx0
169	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3L]
170	cfmval32	mvax3, mvfx0
171	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3M]
172	cfmvam32	mvax3, mvfx0
173	cfldr32		mvfx0, [r0, #CRUNCH_MVAX3H]
174	cfmvah32	mvax3, mvfx0
175
176	cfldr64		mvdx0, [r0, #CRUNCH_MVDX0]	@ load 64b registers
177	cfldr64		mvdx1, [r0, #CRUNCH_MVDX1]
178	cfldr64		mvdx2, [r0, #CRUNCH_MVDX2]
179	cfldr64		mvdx3, [r0, #CRUNCH_MVDX3]
180	cfldr64		mvdx4, [r0, #CRUNCH_MVDX4]
181	cfldr64		mvdx5, [r0, #CRUNCH_MVDX5]
182	cfldr64		mvdx6, [r0, #CRUNCH_MVDX6]
183	cfldr64		mvdx7, [r0, #CRUNCH_MVDX7]
184	cfldr64		mvdx8, [r0, #CRUNCH_MVDX8]
185	cfldr64		mvdx9, [r0, #CRUNCH_MVDX9]
186	cfldr64		mvdx10, [r0, #CRUNCH_MVDX10]
187	cfldr64		mvdx11, [r0, #CRUNCH_MVDX11]
188	cfldr64		mvdx12, [r0, #CRUNCH_MVDX12]
189	cfldr64		mvdx13, [r0, #CRUNCH_MVDX13]
190	cfldr64		mvdx14, [r0, #CRUNCH_MVDX14]
191	cfldr64		mvdx15, [r0, #CRUNCH_MVDX15]
192
1931:
194#ifdef CONFIG_PREEMPT_COUNT
195	get_thread_info r10
196#endif
1972:	dec_preempt_count r10, r3
198	ret	lr
199
200/*
201 * Back up crunch regs to save area and disable access to them
202 * (mainly for gdb or sleep mode usage)
203 *
204 * r0 = struct thread_info pointer of target task or NULL for any
205 */
206ENTRY(crunch_task_disable)
207	stmfd	sp!, {r4, r5, lr}
208
209	mrs	ip, cpsr
210	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
211	msr	cpsr_c, r2
212
213	ldr	r4, =(EP93XX_APB_VIRT_BASE + 0x00130000)	@ syscon addr
214
215	ldr	r3, =crunch_owner
216	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
217	ldr	r1, [r3]			@ get current crunch owner
218	teq	r1, #0				@ any current owner?
219	beq	1f				@ no: quit
220	teq	r0, #0				@ any owner?
221	teqne	r1, r2				@ or specified one?
222	bne	1f				@ no: quit
223
224	ldr	r5, [r4, #0x80]			@ enable access to crunch
225	mov	r2, #0xaa
226	str	r2, [r4, #0xc0]
227	orr	r5, r5, #0x00800000
228	str	r5, [r4, #0x80]
229
230	mov	r0, #0				@ nothing to load
231	str	r0, [r3]			@ no more current owner
232	ldr	r2, [r4, #0x80]			@ flush out enable (@@@)
233	mov	r2, r2
234	bl	crunch_save
235
236	mov	r2, #0xaa			@ disable access to crunch
237	str	r2, [r4, #0xc0]
238	bic	r5, r5, #0x00800000
239	str	r5, [r4, #0x80]
240	ldr	r5, [r4, #0x80]			@ flush out enable (@@@)
241	mov	r5, r5
242
2431:	msr	cpsr_c, ip			@ restore interrupt mode
244	ldmfd	sp!, {r4, r5, pc}
245
246/*
247 * Copy crunch state to given memory address
248 *
249 * r0 = struct thread_info pointer of target task
250 * r1 = memory address where to store crunch state
251 *
252 * this is called mainly in the creation of signal stack frames
253 */
254ENTRY(crunch_task_copy)
255	mrs	ip, cpsr
256	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
257	msr	cpsr_c, r2
258
259	ldr	r3, =crunch_owner
260	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
261	ldr	r3, [r3]			@ get current crunch owner
262	teq	r2, r3				@ does this task own it...
263	beq	1f
264
265	@ current crunch values are in the task save area
266	msr	cpsr_c, ip			@ restore interrupt mode
267	mov	r0, r1
268	mov	r1, r2
269	mov	r2, #CRUNCH_SIZE
270	b	memcpy
271
2721:	@ this task owns crunch regs -- grab a copy from there
273	mov	r0, #0				@ nothing to load
274	mov	r3, lr				@ preserve return address
275	bl	crunch_save
276	msr	cpsr_c, ip			@ restore interrupt mode
277	ret	r3
278
279/*
280 * Restore crunch state from given memory address
281 *
282 * r0 = struct thread_info pointer of target task
283 * r1 = memory address where to get crunch state from
284 *
285 * this is used to restore crunch state when unwinding a signal stack frame
286 */
287ENTRY(crunch_task_restore)
288	mrs	ip, cpsr
289	orr	r2, ip, #PSR_I_BIT		@ disable interrupts
290	msr	cpsr_c, r2
291
292	ldr	r3, =crunch_owner
293	add	r2, r0, #TI_CRUNCH_STATE	@ get task crunch save area
294	ldr	r3, [r3]			@ get current crunch owner
295	teq	r2, r3				@ does this task own it...
296	beq	1f
297
298	@ this task doesn't own crunch regs -- use its save area
299	msr	cpsr_c, ip			@ restore interrupt mode
300	mov	r0, r2
301	mov	r2, #CRUNCH_SIZE
302	b	memcpy
303
3041:	@ this task owns crunch regs -- load them directly
305	mov	r0, r1
306	mov	r1, #0				@ nothing to save
307	mov	r3, lr				@ preserve return address
308	bl	crunch_load
309	msr	cpsr_c, ip			@ restore interrupt mode
310	ret	r3
311