1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#ifdef CONFIG_BOOKE
52	.globl	mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54	mfspr	r0,SPRN_DSRR0
55	stw	r0,_DSRR0(r11)
56	mfspr	r0,SPRN_DSRR1
57	stw	r0,_DSRR1(r11)
58	/* fall through */
59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
60
61	.globl	debug_transfer_to_handler
62debug_transfer_to_handler:
63	mfspr	r0,SPRN_CSRR0
64	stw	r0,_CSRR0(r11)
65	mfspr	r0,SPRN_CSRR1
66	stw	r0,_CSRR1(r11)
67	/* fall through */
68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
69
70	.globl	crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73	mfspr	r0,SPRN_MAS0
74	stw	r0,MAS0(r11)
75	mfspr	r0,SPRN_MAS1
76	stw	r0,MAS1(r11)
77	mfspr	r0,SPRN_MAS2
78	stw	r0,MAS2(r11)
79	mfspr	r0,SPRN_MAS3
80	stw	r0,MAS3(r11)
81	mfspr	r0,SPRN_MAS6
82	stw	r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84	mfspr	r0,SPRN_MAS7
85	stw	r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89	mfspr	r0,SPRN_MMUCR
90	stw	r0,MMUCR(r11)
91#endif
92	mfspr	r0,SPRN_SRR0
93	stw	r0,_SRR0(r11)
94	mfspr	r0,SPRN_SRR1
95	stw	r0,_SRR1(r11)
96
97	/* set the stack limit to the current stack */
98	mfspr	r8,SPRN_SPRG_THREAD
99	lwz	r0,KSP_LIMIT(r8)
100	stw	r0,SAVED_KSP_LIMIT(r11)
101	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
102	stw	r0,KSP_LIMIT(r8)
103	/* fall through */
104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
105#endif
106
107#ifdef CONFIG_40x
108	.globl	crit_transfer_to_handler
109crit_transfer_to_handler:
110	lwz	r0,crit_r10@l(0)
111	stw	r0,GPR10(r11)
112	lwz	r0,crit_r11@l(0)
113	stw	r0,GPR11(r11)
114	mfspr	r0,SPRN_SRR0
115	stw	r0,crit_srr0@l(0)
116	mfspr	r0,SPRN_SRR1
117	stw	r0,crit_srr1@l(0)
118
119	/* set the stack limit to the current stack */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
127#endif
128
129/*
130 * This code finishes saving the registers to the exception frame
131 * and jumps to the appropriate handler for the exception, turning
132 * on address translation.
133 * Note that we rely on the caller having set cr0.eq iff the exception
134 * occurred in kernel mode (i.e. MSR:PR = 0).
135 */
136	.globl	transfer_to_handler_full
137transfer_to_handler_full:
138	SAVE_NVGPRS(r11)
139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
140	/* fall through */
141
142	.globl	transfer_to_handler
143transfer_to_handler:
144	stw	r2,GPR2(r11)
145	stw	r12,_NIP(r11)
146	stw	r9,_MSR(r11)
147	andi.	r2,r9,MSR_PR
148	mfctr	r12
149	mfspr	r2,SPRN_XER
150	stw	r12,_CTR(r11)
151	stw	r2,_XER(r11)
152	mfspr	r12,SPRN_SPRG_THREAD
153	tovirt_vmstack r12, r12
154	beq	2f			/* if from user, fix up THREAD.regs */
155	addi	r2, r12, -THREAD
156	addi	r11,r1,STACK_FRAME_OVERHEAD
157	stw	r11,PT_REGS(r12)
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	/* Check to see if the dbcr0 register is set up to debug.  Use the
160	   internal debug mode bit to do this. */
161	lwz	r12,THREAD_DBCR0(r12)
162	andis.	r12,r12,DBCR0_IDM@h
163#endif
164	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165#ifdef CONFIG_PPC_BOOK3S_32
166	kuep_lock r11, r12
167#endif
168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169	beq+	3f
170	/* From user and task is ptraced - load up global dbcr0 */
171	li	r12,-1			/* clear all pending debug events */
172	mtspr	SPRN_DBSR,r12
173	lis	r11,global_dbcr0@ha
174	tophys(r11,r11)
175	addi	r11,r11,global_dbcr0@l
176#ifdef CONFIG_SMP
177	lwz	r9,TASK_CPU(r2)
178	slwi	r9,r9,3
179	add	r11,r11,r9
180#endif
181	lwz	r12,0(r11)
182	mtspr	SPRN_DBCR0,r12
183	lwz	r12,4(r11)
184	addi	r12,r12,-1
185	stw	r12,4(r11)
186#endif
187
188	b	3f
189
1902:	/* if from kernel, check interrupted DOZE/NAP mode and
191         * check for stack overflow
192         */
193	kuap_save_and_lock r11, r12, r9, r2, r6
194	addi	r2, r12, -THREAD
195#ifndef CONFIG_VMAP_STACK
196	lwz	r9,KSP_LIMIT(r12)
197	cmplw	r1,r9			/* if r1 <= ksp_limit */
198	ble-	stack_ovf		/* then the kernel stack overflowed */
199#endif
2005:
201#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
202	lwz	r12,TI_LOCAL_FLAGS(r2)
203	mtcrf	0x01,r12
204	bt-	31-TLF_NAPPING,4f
205	bt-	31-TLF_SLEEPING,7f
206#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
207	.globl transfer_to_handler_cont
208transfer_to_handler_cont:
2093:
210	mflr	r9
211	tovirt_novmstack r2, r2 	/* set r2 to current */
212	tovirt_vmstack r9, r9
213	lwz	r11,0(r9)		/* virtual address of handler */
214	lwz	r9,4(r9)		/* where to go when done */
215#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
216	mtspr	SPRN_NRI, r0
217#endif
218#ifdef CONFIG_TRACE_IRQFLAGS
219	/*
220	 * When tracing IRQ state (lockdep) we enable the MMU before we call
221	 * the IRQ tracing functions as they might access vmalloc space or
222	 * perform IOs for console output.
223	 *
224	 * To speed up the syscall path where interrupts stay on, let's check
225	 * first if we are changing the MSR value at all.
226	 */
227	tophys_novmstack r12, r1
228	lwz	r12,_MSR(r12)
229	andi.	r12,r12,MSR_EE
230	bne	1f
231
232	/* MSR isn't changing, just transition directly */
233#endif
234	mtspr	SPRN_SRR0,r11
235	mtspr	SPRN_SRR1,r10
236	mtlr	r9
237	RFI				/* jump to handler, enable MMU */
238
239#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2404:	rlwinm	r12,r12,0,~_TLF_NAPPING
241	stw	r12,TI_LOCAL_FLAGS(r2)
242	b	power_save_ppc32_restore
243
2447:	rlwinm	r12,r12,0,~_TLF_SLEEPING
245	stw	r12,TI_LOCAL_FLAGS(r2)
246	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
247	rlwinm	r9,r9,0,~MSR_EE
248	lwz	r12,_LINK(r11)		/* and return to address in LR */
249	kuap_restore r11, r2, r3, r4, r5
250	lwz	r2, GPR2(r11)
251	b	fast_exception_return
252#endif
253_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
254_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
255
256#ifdef CONFIG_TRACE_IRQFLAGS
2571:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
258	 * keep interrupts disabled at this point otherwise we might risk
259	 * taking an interrupt before we tell lockdep they are enabled.
260	 */
261	lis	r12,reenable_mmu@h
262	ori	r12,r12,reenable_mmu@l
263	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
264	mtspr	SPRN_SRR0,r12
265	mtspr	SPRN_SRR1,r0
266	RFI
267
268reenable_mmu:
269	/*
270	 * We save a bunch of GPRs,
271	 * r3 can be different from GPR3(r1) at this point, r9 and r11
272	 * contains the old MSR and handler address respectively,
273	 * r4 & r5 can contain page fault arguments that need to be passed
274	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
275	 * clobbered as they aren't useful past this point.
276	 */
277
278	stwu	r1,-32(r1)
279	stw	r9,8(r1)
280	stw	r11,12(r1)
281	stw	r3,16(r1)
282	stw	r4,20(r1)
283	stw	r5,24(r1)
284
285	/* If we are disabling interrupts (normal case), simply log it with
286	 * lockdep
287	 */
2881:	bl	trace_hardirqs_off
289	lwz	r5,24(r1)
290	lwz	r4,20(r1)
291	lwz	r3,16(r1)
292	lwz	r11,12(r1)
293	lwz	r9,8(r1)
294	addi	r1,r1,32
295	mtctr	r11
296	mtlr	r9
297	bctr				/* jump to handler */
298#endif /* CONFIG_TRACE_IRQFLAGS */
299
300#ifndef CONFIG_VMAP_STACK
301/*
302 * On kernel stack overflow, load up an initial stack pointer
303 * and call StackOverflow(regs), which should not return.
304 */
305stack_ovf:
306	/* sometimes we use a statically-allocated stack, which is OK. */
307	lis	r12,_end@h
308	ori	r12,r12,_end@l
309	cmplw	r1,r12
310	ble	5b			/* r1 <= &_end is OK */
311	SAVE_NVGPRS(r11)
312	addi	r3,r1,STACK_FRAME_OVERHEAD
313	lis	r1,init_thread_union@ha
314	addi	r1,r1,init_thread_union@l
315	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
316	lis	r9,StackOverflow@ha
317	addi	r9,r9,StackOverflow@l
318	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
319#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
320	mtspr	SPRN_NRI, r0
321#endif
322	mtspr	SPRN_SRR0,r9
323	mtspr	SPRN_SRR1,r10
324	RFI
325_ASM_NOKPROBE_SYMBOL(stack_ovf)
326#endif
327
328#ifdef CONFIG_TRACE_IRQFLAGS
329trace_syscall_entry_irq_off:
330	/*
331	 * Syscall shouldn't happen while interrupts are disabled,
332	 * so let's do a warning here.
333	 */
3340:	trap
335	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
336	bl	trace_hardirqs_on
337
338	/* Now enable for real */
339	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
340	mtmsr	r10
341
342	REST_GPR(0, r1)
343	REST_4GPRS(3, r1)
344	REST_2GPRS(7, r1)
345	b	DoSyscall
346#endif /* CONFIG_TRACE_IRQFLAGS */
347
348	.globl	transfer_to_syscall
349transfer_to_syscall:
350#ifdef CONFIG_TRACE_IRQFLAGS
351	andi.	r12,r9,MSR_EE
352	beq-	trace_syscall_entry_irq_off
353#endif /* CONFIG_TRACE_IRQFLAGS */
354
355/*
356 * Handle a system call.
357 */
358	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
359	.stabs	"entry_32.S",N_SO,0,0,0f
3600:
361
362_GLOBAL(DoSyscall)
363	stw	r3,ORIG_GPR3(r1)
364	li	r12,0
365	stw	r12,RESULT(r1)
366#ifdef CONFIG_TRACE_IRQFLAGS
367	/* Make sure interrupts are enabled */
368	mfmsr	r11
369	andi.	r12,r11,MSR_EE
370	/* We came in with interrupts disabled, we WARN and mark them enabled
371	 * for lockdep now */
3720:	tweqi	r12, 0
373	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
374#endif /* CONFIG_TRACE_IRQFLAGS */
375	lwz	r11,TI_FLAGS(r2)
376	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
377	bne-	syscall_dotrace
378syscall_dotrace_cont:
379	cmplwi	0,r0,NR_syscalls
380	lis	r10,sys_call_table@h
381	ori	r10,r10,sys_call_table@l
382	slwi	r0,r0,2
383	bge-	66f
384
385	barrier_nospec_asm
386	/*
387	 * Prevent the load of the handler below (based on the user-passed
388	 * system call number) being speculatively executed until the test
389	 * against NR_syscalls and branch to .66f above has
390	 * committed.
391	 */
392
393	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
394	mtlr	r10
395	addi	r9,r1,STACK_FRAME_OVERHEAD
396	PPC440EP_ERR42
397	blrl			/* Call handler */
398	.globl	ret_from_syscall
399ret_from_syscall:
400#ifdef CONFIG_DEBUG_RSEQ
401	/* Check whether the syscall is issued inside a restartable sequence */
402	stw	r3,GPR3(r1)
403	addi    r3,r1,STACK_FRAME_OVERHEAD
404	bl      rseq_syscall
405	lwz	r3,GPR3(r1)
406#endif
407	mr	r6,r3
408	/* disable interrupts so current_thread_info()->flags can't change */
409	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
410	/* Note: We don't bother telling lockdep about it */
411	mtmsr	r10
412	lwz	r9,TI_FLAGS(r2)
413	li	r8,-MAX_ERRNO
414	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
415	bne-	syscall_exit_work
416	cmplw	0,r3,r8
417	blt+	syscall_exit_cont
418	lwz	r11,_CCR(r1)			/* Load CR */
419	neg	r3,r3
420	oris	r11,r11,0x1000	/* Set SO bit in CR */
421	stw	r11,_CCR(r1)
422syscall_exit_cont:
423	lwz	r8,_MSR(r1)
424#ifdef CONFIG_TRACE_IRQFLAGS
425	/* If we are going to return from the syscall with interrupts
426	 * off, we trace that here. It shouldn't normally happen.
427	 */
428	andi.	r10,r8,MSR_EE
429	bne+	1f
430	stw	r3,GPR3(r1)
431	bl      trace_hardirqs_off
432	lwz	r3,GPR3(r1)
4331:
434#endif /* CONFIG_TRACE_IRQFLAGS */
435#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
436	/* If the process has its own DBCR0 value, load it up.  The internal
437	   debug mode bit tells us that dbcr0 should be loaded. */
438	lwz	r0,THREAD+THREAD_DBCR0(r2)
439	andis.	r10,r0,DBCR0_IDM@h
440	bnel-	load_dbcr0
441#endif
442#ifdef CONFIG_44x
443BEGIN_MMU_FTR_SECTION
444	lis	r4,icache_44x_need_flush@ha
445	lwz	r5,icache_44x_need_flush@l(r4)
446	cmplwi	cr0,r5,0
447	bne-	2f
4481:
449END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
450#endif /* CONFIG_44x */
451BEGIN_FTR_SECTION
452	lwarx	r7,0,r1
453END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
454	stwcx.	r0,0,r1			/* to clear the reservation */
455	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
456#ifdef CONFIG_PPC_BOOK3S_32
457	kuep_unlock r5, r7
458#endif
459	kuap_check r2, r4
460	lwz	r4,_LINK(r1)
461	lwz	r5,_CCR(r1)
462	mtlr	r4
463	mtcr	r5
464	lwz	r7,_NIP(r1)
465	lwz	r2,GPR2(r1)
466	lwz	r1,GPR1(r1)
467syscall_exit_finish:
468#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
469	mtspr	SPRN_NRI, r0
470#endif
471	mtspr	SPRN_SRR0,r7
472	mtspr	SPRN_SRR1,r8
473	RFI
474_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
475#ifdef CONFIG_44x
4762:	li	r7,0
477	iccci	r0,r0
478	stw	r7,icache_44x_need_flush@l(r4)
479	b	1b
480#endif  /* CONFIG_44x */
481
48266:	li	r3,-ENOSYS
483	b	ret_from_syscall
484
485	.globl	ret_from_fork
486ret_from_fork:
487	REST_NVGPRS(r1)
488	bl	schedule_tail
489	li	r3,0
490	b	ret_from_syscall
491
492	.globl	ret_from_kernel_thread
493ret_from_kernel_thread:
494	REST_NVGPRS(r1)
495	bl	schedule_tail
496	mtlr	r14
497	mr	r3,r15
498	PPC440EP_ERR42
499	blrl
500	li	r3,0
501	b	ret_from_syscall
502
503/* Traced system call support */
504syscall_dotrace:
505	SAVE_NVGPRS(r1)
506	li	r0,0xc00
507	stw	r0,_TRAP(r1)
508	addi	r3,r1,STACK_FRAME_OVERHEAD
509	bl	do_syscall_trace_enter
510	/*
511	 * Restore argument registers possibly just changed.
512	 * We use the return value of do_syscall_trace_enter
513	 * for call number to look up in the table (r0).
514	 */
515	mr	r0,r3
516	lwz	r3,GPR3(r1)
517	lwz	r4,GPR4(r1)
518	lwz	r5,GPR5(r1)
519	lwz	r6,GPR6(r1)
520	lwz	r7,GPR7(r1)
521	lwz	r8,GPR8(r1)
522	REST_NVGPRS(r1)
523
524	cmplwi	r0,NR_syscalls
525	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
526	bge-	ret_from_syscall
527	b	syscall_dotrace_cont
528
529syscall_exit_work:
530	andi.	r0,r9,_TIF_RESTOREALL
531	beq+	0f
532	REST_NVGPRS(r1)
533	b	2f
5340:	cmplw	0,r3,r8
535	blt+	1f
536	andi.	r0,r9,_TIF_NOERROR
537	bne-	1f
538	lwz	r11,_CCR(r1)			/* Load CR */
539	neg	r3,r3
540	oris	r11,r11,0x1000	/* Set SO bit in CR */
541	stw	r11,_CCR(r1)
542
5431:	stw	r6,RESULT(r1)	/* Save result */
544	stw	r3,GPR3(r1)	/* Update return value */
5452:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
546	beq	4f
547
548	/* Clear per-syscall TIF flags if any are set.  */
549
550	li	r11,_TIF_PERSYSCALL_MASK
551	addi	r12,r2,TI_FLAGS
5523:	lwarx	r8,0,r12
553	andc	r8,r8,r11
554	stwcx.	r8,0,r12
555	bne-	3b
556
5574:	/* Anything which requires enabling interrupts? */
558	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
559	beq	ret_from_except
560
561	/* Re-enable interrupts. There is no need to trace that with
562	 * lockdep as we are supposed to have IRQs on at this point
563	 */
564	ori	r10,r10,MSR_EE
565	mtmsr	r10
566
567	/* Save NVGPRS if they're not saved already */
568	lwz	r4,_TRAP(r1)
569	andi.	r4,r4,1
570	beq	5f
571	SAVE_NVGPRS(r1)
572	li	r4,0xc00
573	stw	r4,_TRAP(r1)
5745:
575	addi	r3,r1,STACK_FRAME_OVERHEAD
576	bl	do_syscall_trace_leave
577	b	ret_from_except_full
578
579	/*
580	 * System call was called from kernel. We get here with SRR1 in r9.
581	 * Mark the exception as recoverable once we have retrieved SRR0,
582	 * trap a warning and return ENOSYS with CR[SO] set.
583	 */
584	.globl	ret_from_kernel_syscall
585ret_from_kernel_syscall:
586	mfspr	r9, SPRN_SRR0
587	mfspr	r10, SPRN_SRR1
588#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
589	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
590	mtmsr	r11
591#endif
592
5930:	trap
594	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
595
596	li	r3, ENOSYS
597	crset	so
598#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
599	mtspr	SPRN_NRI, r0
600#endif
601	mtspr	SPRN_SRR0, r9
602	mtspr	SPRN_SRR1, r10
603	RFI
604_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
605
606/*
607 * The fork/clone functions need to copy the full register set into
608 * the child process. Therefore we need to save all the nonvolatile
609 * registers (r13 - r31) before calling the C code.
610 */
611	.globl	ppc_fork
612ppc_fork:
613	SAVE_NVGPRS(r1)
614	lwz	r0,_TRAP(r1)
615	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
616	stw	r0,_TRAP(r1)		/* register set saved */
617	b	sys_fork
618
619	.globl	ppc_vfork
620ppc_vfork:
621	SAVE_NVGPRS(r1)
622	lwz	r0,_TRAP(r1)
623	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
624	stw	r0,_TRAP(r1)		/* register set saved */
625	b	sys_vfork
626
627	.globl	ppc_clone
628ppc_clone:
629	SAVE_NVGPRS(r1)
630	lwz	r0,_TRAP(r1)
631	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
632	stw	r0,_TRAP(r1)		/* register set saved */
633	b	sys_clone
634
635	.globl	ppc_clone3
636ppc_clone3:
637	SAVE_NVGPRS(r1)
638	lwz	r0,_TRAP(r1)
639	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
640	stw	r0,_TRAP(r1)		/* register set saved */
641	b	sys_clone3
642
643	.globl	ppc_swapcontext
644ppc_swapcontext:
645	SAVE_NVGPRS(r1)
646	lwz	r0,_TRAP(r1)
647	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
648	stw	r0,_TRAP(r1)		/* register set saved */
649	b	sys_swapcontext
650
651/*
652 * Top-level page fault handling.
653 * This is in assembler because if do_page_fault tells us that
654 * it is a bad kernel page fault, we want to save the non-volatile
655 * registers before calling bad_page_fault.
656 */
657	.globl	handle_page_fault
658handle_page_fault:
659	addi	r3,r1,STACK_FRAME_OVERHEAD
660#ifdef CONFIG_PPC_BOOK3S_32
661	andis.  r0,r5,DSISR_DABRMATCH@h
662	bne-    handle_dabr_fault
663#endif
664	bl	do_page_fault
665	cmpwi	r3,0
666	beq+	ret_from_except
667	SAVE_NVGPRS(r1)
668	lwz	r0,_TRAP(r1)
669	clrrwi	r0,r0,1
670	stw	r0,_TRAP(r1)
671	mr	r5,r3
672	addi	r3,r1,STACK_FRAME_OVERHEAD
673	lwz	r4,_DAR(r1)
674	bl	bad_page_fault
675	b	ret_from_except_full
676
677#ifdef CONFIG_PPC_BOOK3S_32
678	/* We have a data breakpoint exception - handle it */
679handle_dabr_fault:
680	SAVE_NVGPRS(r1)
681	lwz	r0,_TRAP(r1)
682	clrrwi	r0,r0,1
683	stw	r0,_TRAP(r1)
684	bl      do_break
685	b	ret_from_except_full
686#endif
687
688/*
689 * This routine switches between two different tasks.  The process
690 * state of one is saved on its kernel stack.  Then the state
691 * of the other is restored from its kernel stack.  The memory
692 * management hardware is updated to the second process's state.
693 * Finally, we can return to the second process.
694 * On entry, r3 points to the THREAD for the current task, r4
695 * points to the THREAD for the new task.
696 *
697 * This routine is always called with interrupts disabled.
698 *
699 * Note: there are two ways to get to the "going out" portion
700 * of this code; either by coming in via the entry (_switch)
701 * or via "fork" which must set up an environment equivalent
702 * to the "_switch" path.  If you change this , you'll have to
703 * change the fork code also.
704 *
705 * The code which creates the new task context is in 'copy_thread'
706 * in arch/ppc/kernel/process.c
707 */
708_GLOBAL(_switch)
709	stwu	r1,-INT_FRAME_SIZE(r1)
710	mflr	r0
711	stw	r0,INT_FRAME_SIZE+4(r1)
712	/* r3-r12 are caller saved -- Cort */
713	SAVE_NVGPRS(r1)
714	stw	r0,_NIP(r1)	/* Return to switch caller */
715	mfmsr	r11
716	li	r0,MSR_FP	/* Disable floating-point */
717#ifdef CONFIG_ALTIVEC
718BEGIN_FTR_SECTION
719	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
720	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
721	stw	r12,THREAD+THREAD_VRSAVE(r2)
722END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
723#endif /* CONFIG_ALTIVEC */
724#ifdef CONFIG_SPE
725BEGIN_FTR_SECTION
726	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
727	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
728	stw	r12,THREAD+THREAD_SPEFSCR(r2)
729END_FTR_SECTION_IFSET(CPU_FTR_SPE)
730#endif /* CONFIG_SPE */
731	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
732	beq+	1f
733	andc	r11,r11,r0
734	mtmsr	r11
735	isync
7361:	stw	r11,_MSR(r1)
737	mfcr	r10
738	stw	r10,_CCR(r1)
739	stw	r1,KSP(r3)	/* Set old stack pointer */
740
741	kuap_check r2, r0
742#ifdef CONFIG_SMP
743	/* We need a sync somewhere here to make sure that if the
744	 * previous task gets rescheduled on another CPU, it sees all
745	 * stores it has performed on this one.
746	 */
747	sync
748#endif /* CONFIG_SMP */
749
750	tophys(r0,r4)
751	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
752	lwz	r1,KSP(r4)	/* Load new stack pointer */
753
754	/* save the old current 'last' for return value */
755	mr	r3,r2
756	addi	r2,r4,-THREAD	/* Update current */
757
758#ifdef CONFIG_ALTIVEC
759BEGIN_FTR_SECTION
760	lwz	r0,THREAD+THREAD_VRSAVE(r2)
761	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
762END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
763#endif /* CONFIG_ALTIVEC */
764#ifdef CONFIG_SPE
765BEGIN_FTR_SECTION
766	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
767	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
768END_FTR_SECTION_IFSET(CPU_FTR_SPE)
769#endif /* CONFIG_SPE */
770
771	lwz	r0,_CCR(r1)
772	mtcrf	0xFF,r0
773	/* r3-r12 are destroyed -- Cort */
774	REST_NVGPRS(r1)
775
776	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
777	mtlr	r4
778	addi	r1,r1,INT_FRAME_SIZE
779	blr
780
781	.globl	fast_exception_return
782fast_exception_return:
783#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
784	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
785	beq	1f			/* if not, we've got problems */
786#endif
787
7882:	REST_4GPRS(3, r11)
789	lwz	r10,_CCR(r11)
790	REST_GPR(1, r11)
791	mtcr	r10
792	lwz	r10,_LINK(r11)
793	mtlr	r10
794	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
795	li	r10, 0
796	stw	r10, 8(r11)
797	REST_GPR(10, r11)
798#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
799	mtspr	SPRN_NRI, r0
800#endif
801	mtspr	SPRN_SRR1,r9
802	mtspr	SPRN_SRR0,r12
803	REST_GPR(9, r11)
804	REST_GPR(12, r11)
805	lwz	r11,GPR11(r11)
806	RFI
807_ASM_NOKPROBE_SYMBOL(fast_exception_return)
808
809#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
810/* check if the exception happened in a restartable section */
8111:	lis	r3,exc_exit_restart_end@ha
812	addi	r3,r3,exc_exit_restart_end@l
813	cmplw	r12,r3
814	bge	3f
815	lis	r4,exc_exit_restart@ha
816	addi	r4,r4,exc_exit_restart@l
817	cmplw	r12,r4
818	blt	3f
819	lis	r3,fee_restarts@ha
820	tophys(r3,r3)
821	lwz	r5,fee_restarts@l(r3)
822	addi	r5,r5,1
823	stw	r5,fee_restarts@l(r3)
824	mr	r12,r4		/* restart at exc_exit_restart */
825	b	2b
826
827	.section .bss
828	.align	2
829fee_restarts:
830	.space	4
831	.previous
832
833/* aargh, a nonrecoverable interrupt, panic */
834/* aargh, we don't know which trap this is */
8353:
836	li	r10,-1
837	stw	r10,_TRAP(r11)
838	addi	r3,r1,STACK_FRAME_OVERHEAD
839	lis	r10,MSR_KERNEL@h
840	ori	r10,r10,MSR_KERNEL@l
841	bl	transfer_to_handler_full
842	.long	unrecoverable_exception
843	.long	ret_from_except
844#endif
845
846	.globl	ret_from_except_full
847ret_from_except_full:
848	REST_NVGPRS(r1)
849	/* fall through */
850
851	.globl	ret_from_except
852ret_from_except:
853	/* Hard-disable interrupts so that current_thread_info()->flags
854	 * can't change between when we test it and when we return
855	 * from the interrupt. */
856	/* Note: We don't bother telling lockdep about it */
857	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
858	mtmsr	r10		/* disable interrupts */
859
860	lwz	r3,_MSR(r1)	/* Returning to user mode? */
861	andi.	r0,r3,MSR_PR
862	beq	resume_kernel
863
864user_exc_return:		/* r10 contains MSR_KERNEL here */
865	/* Check current_thread_info()->flags */
866	lwz	r9,TI_FLAGS(r2)
867	andi.	r0,r9,_TIF_USER_WORK_MASK
868	bne	do_work
869
870restore_user:
871#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
872	/* Check whether this process has its own DBCR0 value.  The internal
873	   debug mode bit tells us that dbcr0 should be loaded. */
874	lwz	r0,THREAD+THREAD_DBCR0(r2)
875	andis.	r10,r0,DBCR0_IDM@h
876	bnel-	load_dbcr0
877#endif
878	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
879#ifdef CONFIG_PPC_BOOK3S_32
880	kuep_unlock	r10, r11
881#endif
882
883	b	restore
884
885/* N.B. the only way to get here is from the beq following ret_from_except. */
886resume_kernel:
887	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
888	lwz	r8,TI_FLAGS(r2)
889	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
890	beq+	1f
891
892	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
893
894	lwz	r3,GPR1(r1)
895	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
896	mr	r4,r1			/* src:  current exception frame */
897	mr	r1,r3			/* Reroute the trampoline frame to r1 */
898
899	/* Copy from the original to the trampoline. */
900	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
901	li	r6,0			/* start offset: 0 */
902	mtctr	r5
9032:	lwzx	r0,r6,r4
904	stwx	r0,r6,r3
905	addi	r6,r6,4
906	bdnz	2b
907
908	/* Do real store operation to complete stwu */
909	lwz	r5,GPR1(r1)
910	stw	r8,0(r5)
911
912	/* Clear _TIF_EMULATE_STACK_STORE flag */
913	lis	r11,_TIF_EMULATE_STACK_STORE@h
914	addi	r5,r2,TI_FLAGS
9150:	lwarx	r8,0,r5
916	andc	r8,r8,r11
917	stwcx.	r8,0,r5
918	bne-	0b
9191:
920
921#ifdef CONFIG_PREEMPTION
922	/* check current_thread_info->preempt_count */
923	lwz	r0,TI_PREEMPT(r2)
924	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
925	bne	restore_kuap
926	andi.	r8,r8,_TIF_NEED_RESCHED
927	beq+	restore_kuap
928	lwz	r3,_MSR(r1)
929	andi.	r0,r3,MSR_EE	/* interrupts off? */
930	beq	restore_kuap	/* don't schedule if so */
931#ifdef CONFIG_TRACE_IRQFLAGS
932	/* Lockdep thinks irqs are enabled, we need to call
933	 * preempt_schedule_irq with IRQs off, so we inform lockdep
934	 * now that we -did- turn them off already
935	 */
936	bl	trace_hardirqs_off
937#endif
938	bl	preempt_schedule_irq
939#ifdef CONFIG_TRACE_IRQFLAGS
940	/* And now, to properly rebalance the above, we tell lockdep they
941	 * are being turned back on, which will happen when we return
942	 */
943	bl	trace_hardirqs_on
944#endif
945#endif /* CONFIG_PREEMPTION */
946restore_kuap:
947	kuap_restore r1, r2, r9, r10, r0
948
949	/* interrupts are hard-disabled at this point */
950restore:
951#ifdef CONFIG_44x
952BEGIN_MMU_FTR_SECTION
953	b	1f
954END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
955	lis	r4,icache_44x_need_flush@ha
956	lwz	r5,icache_44x_need_flush@l(r4)
957	cmplwi	cr0,r5,0
958	beq+	1f
959	li	r6,0
960	iccci	r0,r0
961	stw	r6,icache_44x_need_flush@l(r4)
9621:
963#endif  /* CONFIG_44x */
964
965	lwz	r9,_MSR(r1)
966#ifdef CONFIG_TRACE_IRQFLAGS
967	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
968	 * off in this assembly code while peeking at TI_FLAGS() and such. However
969	 * we need to inform it if the exception turned interrupts off, and we
970	 * are about to trun them back on.
971	 */
972	andi.	r10,r9,MSR_EE
973	beq	1f
974	stwu	r1,-32(r1)
975	mflr	r0
976	stw	r0,4(r1)
977	bl	trace_hardirqs_on
978	addi	r1, r1, 32
979	lwz	r9,_MSR(r1)
9801:
981#endif /* CONFIG_TRACE_IRQFLAGS */
982
983	lwz	r0,GPR0(r1)
984	lwz	r2,GPR2(r1)
985	REST_4GPRS(3, r1)
986	REST_2GPRS(7, r1)
987
988	lwz	r10,_XER(r1)
989	lwz	r11,_CTR(r1)
990	mtspr	SPRN_XER,r10
991	mtctr	r11
992
993BEGIN_FTR_SECTION
994	lwarx	r11,0,r1
995END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
996	stwcx.	r0,0,r1			/* to clear the reservation */
997
998#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
999	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
1000	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
1001
1002	lwz	r10,_CCR(r1)
1003	lwz	r11,_LINK(r1)
1004	mtcrf	0xFF,r10
1005	mtlr	r11
1006
1007	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1008	li	r10, 0
1009	stw	r10, 8(r1)
1010	/*
1011	 * Once we put values in SRR0 and SRR1, we are in a state
1012	 * where exceptions are not recoverable, since taking an
1013	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1014	 * MSR:RI bit to indicate this.  If we do take an exception,
1015	 * we can't return to the point of the exception but we
1016	 * can restart the exception exit path at the label
1017	 * exc_exit_restart below.  -- paulus
1018	 */
1019	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1020	mtmsr	r10		/* clear the RI bit */
1021	.globl exc_exit_restart
1022exc_exit_restart:
1023	lwz	r12,_NIP(r1)
1024	mtspr	SPRN_SRR0,r12
1025	mtspr	SPRN_SRR1,r9
1026	REST_4GPRS(9, r1)
1027	lwz	r1,GPR1(r1)
1028	.globl exc_exit_restart_end
1029exc_exit_restart_end:
1030	RFI
1031_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1032_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
1033
1034#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1035	/*
1036	 * This is a bit different on 4xx/Book-E because it doesn't have
1037	 * the RI bit in the MSR.
1038	 * The TLB miss handler checks if we have interrupted
1039	 * the exception exit path and restarts it if so
1040	 * (well maybe one day it will... :).
1041	 */
1042	lwz	r11,_LINK(r1)
1043	mtlr	r11
1044	lwz	r10,_CCR(r1)
1045	mtcrf	0xff,r10
1046	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1047	li	r10, 0
1048	stw	r10, 8(r1)
1049	REST_2GPRS(9, r1)
1050	.globl exc_exit_restart
1051exc_exit_restart:
1052	lwz	r11,_NIP(r1)
1053	lwz	r12,_MSR(r1)
1054	mtspr	SPRN_SRR0,r11
1055	mtspr	SPRN_SRR1,r12
1056	REST_2GPRS(11, r1)
1057	lwz	r1,GPR1(r1)
1058	.globl exc_exit_restart_end
1059exc_exit_restart_end:
1060	rfi
1061	b	.			/* prevent prefetch past rfi */
1062_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1063
1064/*
1065 * Returning from a critical interrupt in user mode doesn't need
1066 * to be any different from a normal exception.  For a critical
1067 * interrupt in the kernel, we just return (without checking for
1068 * preemption) since the interrupt may have happened at some crucial
1069 * place (e.g. inside the TLB miss handler), and because we will be
1070 * running with r1 pointing into critical_stack, not the current
1071 * process's kernel stack (and therefore current_thread_info() will
1072 * give the wrong answer).
1073 * We have to restore various SPRs that may have been in use at the
1074 * time of the critical interrupt.
1075 *
1076 */
1077#ifdef CONFIG_40x
1078#define PPC_40x_TURN_OFF_MSR_DR						    \
1079	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1080	 * assume the instructions here are mapped by a pinned TLB entry */ \
1081	li	r10,MSR_IR;						    \
1082	mtmsr	r10;							    \
1083	isync;								    \
1084	tophys(r1, r1);
1085#else
1086#define PPC_40x_TURN_OFF_MSR_DR
1087#endif
1088
1089#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1090	REST_NVGPRS(r1);						\
1091	lwz	r3,_MSR(r1);						\
1092	andi.	r3,r3,MSR_PR;						\
1093	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1094	bne	user_exc_return;					\
1095	lwz	r0,GPR0(r1);						\
1096	lwz	r2,GPR2(r1);						\
1097	REST_4GPRS(3, r1);						\
1098	REST_2GPRS(7, r1);						\
1099	lwz	r10,_XER(r1);						\
1100	lwz	r11,_CTR(r1);						\
1101	mtspr	SPRN_XER,r10;						\
1102	mtctr	r11;							\
1103	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1104	lwz	r11,_LINK(r1);						\
1105	mtlr	r11;							\
1106	lwz	r10,_CCR(r1);						\
1107	mtcrf	0xff,r10;						\
1108	PPC_40x_TURN_OFF_MSR_DR;					\
1109	lwz	r9,_DEAR(r1);						\
1110	lwz	r10,_ESR(r1);						\
1111	mtspr	SPRN_DEAR,r9;						\
1112	mtspr	SPRN_ESR,r10;						\
1113	lwz	r11,_NIP(r1);						\
1114	lwz	r12,_MSR(r1);						\
1115	mtspr	exc_lvl_srr0,r11;					\
1116	mtspr	exc_lvl_srr1,r12;					\
1117	lwz	r9,GPR9(r1);						\
1118	lwz	r12,GPR12(r1);						\
1119	lwz	r10,GPR10(r1);						\
1120	lwz	r11,GPR11(r1);						\
1121	lwz	r1,GPR1(r1);						\
1122	exc_lvl_rfi;							\
1123	b	.;		/* prevent prefetch past exc_lvl_rfi */
1124
1125#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1126	lwz	r9,_##exc_lvl_srr0(r1);					\
1127	lwz	r10,_##exc_lvl_srr1(r1);				\
1128	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1129	mtspr	SPRN_##exc_lvl_srr1,r10;
1130
1131#if defined(CONFIG_PPC_BOOK3E_MMU)
1132#ifdef CONFIG_PHYS_64BIT
1133#define	RESTORE_MAS7							\
1134	lwz	r11,MAS7(r1);						\
1135	mtspr	SPRN_MAS7,r11;
1136#else
1137#define	RESTORE_MAS7
1138#endif /* CONFIG_PHYS_64BIT */
1139#define RESTORE_MMU_REGS						\
1140	lwz	r9,MAS0(r1);						\
1141	lwz	r10,MAS1(r1);						\
1142	lwz	r11,MAS2(r1);						\
1143	mtspr	SPRN_MAS0,r9;						\
1144	lwz	r9,MAS3(r1);						\
1145	mtspr	SPRN_MAS1,r10;						\
1146	lwz	r10,MAS6(r1);						\
1147	mtspr	SPRN_MAS2,r11;						\
1148	mtspr	SPRN_MAS3,r9;						\
1149	mtspr	SPRN_MAS6,r10;						\
1150	RESTORE_MAS7;
1151#elif defined(CONFIG_44x)
1152#define RESTORE_MMU_REGS						\
1153	lwz	r9,MMUCR(r1);						\
1154	mtspr	SPRN_MMUCR,r9;
1155#else
1156#define RESTORE_MMU_REGS
1157#endif
1158
1159#ifdef CONFIG_40x
1160	.globl	ret_from_crit_exc
1161ret_from_crit_exc:
1162	mfspr	r9,SPRN_SPRG_THREAD
1163	lis	r10,saved_ksp_limit@ha;
1164	lwz	r10,saved_ksp_limit@l(r10);
1165	tovirt(r9,r9);
1166	stw	r10,KSP_LIMIT(r9)
1167	lis	r9,crit_srr0@ha;
1168	lwz	r9,crit_srr0@l(r9);
1169	lis	r10,crit_srr1@ha;
1170	lwz	r10,crit_srr1@l(r10);
1171	mtspr	SPRN_SRR0,r9;
1172	mtspr	SPRN_SRR1,r10;
1173	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1174_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1175#endif /* CONFIG_40x */
1176
1177#ifdef CONFIG_BOOKE
1178	.globl	ret_from_crit_exc
1179ret_from_crit_exc:
1180	mfspr	r9,SPRN_SPRG_THREAD
1181	lwz	r10,SAVED_KSP_LIMIT(r1)
1182	stw	r10,KSP_LIMIT(r9)
1183	RESTORE_xSRR(SRR0,SRR1);
1184	RESTORE_MMU_REGS;
1185	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1186_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1187
1188	.globl	ret_from_debug_exc
1189ret_from_debug_exc:
1190	mfspr	r9,SPRN_SPRG_THREAD
1191	lwz	r10,SAVED_KSP_LIMIT(r1)
1192	stw	r10,KSP_LIMIT(r9)
1193	RESTORE_xSRR(SRR0,SRR1);
1194	RESTORE_xSRR(CSRR0,CSRR1);
1195	RESTORE_MMU_REGS;
1196	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1197_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
1198
1199	.globl	ret_from_mcheck_exc
1200ret_from_mcheck_exc:
1201	mfspr	r9,SPRN_SPRG_THREAD
1202	lwz	r10,SAVED_KSP_LIMIT(r1)
1203	stw	r10,KSP_LIMIT(r9)
1204	RESTORE_xSRR(SRR0,SRR1);
1205	RESTORE_xSRR(CSRR0,CSRR1);
1206	RESTORE_xSRR(DSRR0,DSRR1);
1207	RESTORE_MMU_REGS;
1208	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1209_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
1210#endif /* CONFIG_BOOKE */
1211
1212/*
1213 * Load the DBCR0 value for a task that is being ptraced,
1214 * having first saved away the global DBCR0.  Note that r0
1215 * has the dbcr0 value to set upon entry to this.
1216 */
1217load_dbcr0:
1218	mfmsr	r10		/* first disable debug exceptions */
1219	rlwinm	r10,r10,0,~MSR_DE
1220	mtmsr	r10
1221	isync
1222	mfspr	r10,SPRN_DBCR0
1223	lis	r11,global_dbcr0@ha
1224	addi	r11,r11,global_dbcr0@l
1225#ifdef CONFIG_SMP
1226	lwz	r9,TASK_CPU(r2)
1227	slwi	r9,r9,3
1228	add	r11,r11,r9
1229#endif
1230	stw	r10,0(r11)
1231	mtspr	SPRN_DBCR0,r0
1232	lwz	r10,4(r11)
1233	addi	r10,r10,1
1234	stw	r10,4(r11)
1235	li	r11,-1
1236	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1237	blr
1238
1239	.section .bss
1240	.align	4
1241	.global global_dbcr0
1242global_dbcr0:
1243	.space	8*NR_CPUS
1244	.previous
1245#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1246
1247do_work:			/* r10 contains MSR_KERNEL here */
1248	andi.	r0,r9,_TIF_NEED_RESCHED
1249	beq	do_user_signal
1250
1251do_resched:			/* r10 contains MSR_KERNEL here */
1252#ifdef CONFIG_TRACE_IRQFLAGS
1253	bl	trace_hardirqs_on
1254	mfmsr	r10
1255#endif
1256	ori	r10,r10,MSR_EE
1257	mtmsr	r10		/* hard-enable interrupts */
1258	bl	schedule
1259recheck:
1260	/* Note: And we don't tell it we are disabling them again
1261	 * neither. Those disable/enable cycles used to peek at
1262	 * TI_FLAGS aren't advertised.
1263	 */
1264	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1265	mtmsr	r10		/* disable interrupts */
1266	lwz	r9,TI_FLAGS(r2)
1267	andi.	r0,r9,_TIF_NEED_RESCHED
1268	bne-	do_resched
1269	andi.	r0,r9,_TIF_USER_WORK_MASK
1270	beq	restore_user
1271do_user_signal:			/* r10 contains MSR_KERNEL here */
1272	ori	r10,r10,MSR_EE
1273	mtmsr	r10		/* hard-enable interrupts */
1274	/* save r13-r31 in the exception frame, if not already done */
1275	lwz	r3,_TRAP(r1)
1276	andi.	r0,r3,1
1277	beq	2f
1278	SAVE_NVGPRS(r1)
1279	rlwinm	r3,r3,0,0,30
1280	stw	r3,_TRAP(r1)
12812:	addi	r3,r1,STACK_FRAME_OVERHEAD
1282	mr	r4,r9
1283	bl	do_notify_resume
1284	REST_NVGPRS(r1)
1285	b	recheck
1286
1287/*
1288 * We come here when we are at the end of handling an exception
1289 * that occurred at a place where taking an exception will lose
1290 * state information, such as the contents of SRR0 and SRR1.
1291 */
1292nonrecoverable:
1293	lis	r10,exc_exit_restart_end@ha
1294	addi	r10,r10,exc_exit_restart_end@l
1295	cmplw	r12,r10
1296	bge	3f
1297	lis	r11,exc_exit_restart@ha
1298	addi	r11,r11,exc_exit_restart@l
1299	cmplw	r12,r11
1300	blt	3f
1301	lis	r10,ee_restarts@ha
1302	lwz	r12,ee_restarts@l(r10)
1303	addi	r12,r12,1
1304	stw	r12,ee_restarts@l(r10)
1305	mr	r12,r11		/* restart at exc_exit_restart */
1306	blr
13073:	/* OK, we can't recover, kill this process */
1308	lwz	r3,_TRAP(r1)
1309	andi.	r0,r3,1
1310	beq	5f
1311	SAVE_NVGPRS(r1)
1312	rlwinm	r3,r3,0,0,30
1313	stw	r3,_TRAP(r1)
13145:	mfspr	r2,SPRN_SPRG_THREAD
1315	addi	r2,r2,-THREAD
1316	tovirt(r2,r2)			/* set back r2 to current */
13174:	addi	r3,r1,STACK_FRAME_OVERHEAD
1318	bl	unrecoverable_exception
1319	/* shouldn't return */
1320	b	4b
1321_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1322
1323	.section .bss
1324	.align	2
1325ee_restarts:
1326	.space	4
1327	.previous
1328
1329/*
1330 * PROM code for specific machines follows.  Put it
1331 * here so it's easy to add arch-specific sections later.
1332 * -- Cort
1333 */
1334#ifdef CONFIG_PPC_RTAS
1335/*
1336 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1337 * called with the MMU off.
1338 */
1339_GLOBAL(enter_rtas)
1340	stwu	r1,-INT_FRAME_SIZE(r1)
1341	mflr	r0
1342	stw	r0,INT_FRAME_SIZE+4(r1)
1343	LOAD_REG_ADDR(r4, rtas)
1344	lis	r6,1f@ha	/* physical return address for rtas */
1345	addi	r6,r6,1f@l
1346	tophys(r6,r6)
1347	tophys_novmstack r7, r1
1348	lwz	r8,RTASENTRY(r4)
1349	lwz	r4,RTASBASE(r4)
1350	mfmsr	r9
1351	stw	r9,8(r1)
1352	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1353	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
1354	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1355	mtlr	r6
1356	stw	r7, THREAD + RTAS_SP(r2)
1357	mtspr	SPRN_SRR0,r8
1358	mtspr	SPRN_SRR1,r9
1359	RFI
13601:	tophys_novmstack r9, r1
1361#ifdef CONFIG_VMAP_STACK
1362	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1363	mtmsr	r0
1364	isync
1365#endif
1366	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1367	lwz	r9,8(r9)	/* original msr value */
1368	addi	r1,r1,INT_FRAME_SIZE
1369	li	r0,0
1370	tophys_novmstack r7, r2
1371	stw	r0, THREAD + RTAS_SP(r7)
1372	mtspr	SPRN_SRR0,r8
1373	mtspr	SPRN_SRR1,r9
1374	RFI			/* return to caller */
1375_ASM_NOKPROBE_SYMBOL(enter_rtas)
1376#endif /* CONFIG_PPC_RTAS */
1377