1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <asm/cache.h>
20#include <asm/unistd.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/thread_info.h>
25#include <asm/code-patching-asm.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/bug.h>
31#include <asm/ptrace.h>
32#include <asm/irqflags.h>
33#include <asm/hw_irq.h>
34#include <asm/context_tracking.h>
35#include <asm/tm.h>
36#include <asm/ppc-opcode.h>
37#include <asm/barrier.h>
38#include <asm/export.h>
39#include <asm/asm-compat.h>
40#ifdef CONFIG_PPC_BOOK3S
41#include <asm/exception-64s.h>
42#else
43#include <asm/exception-64e.h>
44#endif
45#include <asm/feature-fixups.h>
46#include <asm/kup.h>
47
48/*
49 * System calls.
50 */
51	.section	".toc","aw"
52SYS_CALL_TABLE:
53	.tc sys_call_table[TC],sys_call_table
54
55#ifdef CONFIG_COMPAT
56COMPAT_SYS_CALL_TABLE:
57	.tc compat_sys_call_table[TC],compat_sys_call_table
58#endif
59
60/* This value is used to mark exception frames on the stack. */
61exception_marker:
62	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
63
64	.section	".text"
65	.align 7
66
67#ifdef CONFIG_PPC_BOOK3S
68.macro system_call_vectored name trapnr
69	.globl system_call_vectored_\name
70system_call_vectored_\name:
71_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
72#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
73BEGIN_FTR_SECTION
74	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
75	bne	.Ltabort_syscall
76END_FTR_SECTION_IFSET(CPU_FTR_TM)
77#endif
78	INTERRUPT_TO_KERNEL
79	mr	r10,r1
80	ld	r1,PACAKSAVE(r13)
81	std	r10,0(r1)
82	std	r11,_NIP(r1)
83	std	r12,_MSR(r1)
84	std	r0,GPR0(r1)
85	std	r10,GPR1(r1)
86	std	r2,GPR2(r1)
87	ld	r2,PACATOC(r13)
88	mfcr	r12
89	li	r11,0
90	/* Can we avoid saving r3-r8 in common case? */
91	std	r3,GPR3(r1)
92	std	r4,GPR4(r1)
93	std	r5,GPR5(r1)
94	std	r6,GPR6(r1)
95	std	r7,GPR7(r1)
96	std	r8,GPR8(r1)
97	/* Zero r9-r12, this should only be required when restoring all GPRs */
98	std	r11,GPR9(r1)
99	std	r11,GPR10(r1)
100	std	r11,GPR11(r1)
101	std	r11,GPR12(r1)
102	std	r9,GPR13(r1)
103	SAVE_NVGPRS(r1)
104	std	r11,_XER(r1)
105	std	r11,_LINK(r1)
106	std	r11,_CTR(r1)
107
108	li	r11,\trapnr
109	std	r11,_TRAP(r1)
110	std	r12,_CCR(r1)
111	std	r3,ORIG_GPR3(r1)
112	addi	r10,r1,STACK_FRAME_OVERHEAD
113	ld	r11,exception_marker@toc(r2)
114	std	r11,-16(r10)		/* "regshere" marker */
115
116BEGIN_FTR_SECTION
117	HMT_MEDIUM
118END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
119
120	/*
121	 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
122	 * would clobber syscall parameters. Also we always enter with IRQs
123	 * enabled and nothing pending. system_call_exception() will call
124	 * trace_hardirqs_off().
125	 *
126	 * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The
127	 * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED.
128	 */
129
130	/* Calling convention has r9 = orig r0, r10 = regs */
131	mr	r9,r0
132	bl	system_call_exception
133
134.Lsyscall_vectored_\name\()_exit:
135	addi    r4,r1,STACK_FRAME_OVERHEAD
136	li	r5,1 /* scv */
137	bl	syscall_exit_prepare
138
139	ld	r2,_CCR(r1)
140	ld	r4,_NIP(r1)
141	ld	r5,_MSR(r1)
142
143BEGIN_FTR_SECTION
144	stdcx.	r0,0,r1			/* to clear the reservation */
145END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
146
147BEGIN_FTR_SECTION
148	HMT_MEDIUM_LOW
149END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
150
151	cmpdi	r3,0
152	bne	.Lsyscall_vectored_\name\()_restore_regs
153
154	/* rfscv returns with LR->NIA and CTR->MSR */
155	mtlr	r4
156	mtctr	r5
157
158	/* Could zero these as per ABI, but we may consider a stricter ABI
159	 * which preserves these if libc implementations can benefit, so
160	 * restore them for now until further measurement is done. */
161	ld	r0,GPR0(r1)
162	ld	r4,GPR4(r1)
163	ld	r5,GPR5(r1)
164	ld	r6,GPR6(r1)
165	ld	r7,GPR7(r1)
166	ld	r8,GPR8(r1)
167	/* Zero volatile regs that may contain sensitive kernel data */
168	li	r9,0
169	li	r10,0
170	li	r11,0
171	li	r12,0
172	mtspr	SPRN_XER,r0
173
174	/*
175	 * We don't need to restore AMR on the way back to userspace for KUAP.
176	 * The value of AMR only matters while we're in the kernel.
177	 */
178	mtcr	r2
179	ld	r2,GPR2(r1)
180	ld	r3,GPR3(r1)
181	ld	r13,GPR13(r1)
182	ld	r1,GPR1(r1)
183	RFSCV_TO_USER
184	b	.	/* prevent speculative execution */
185
186.Lsyscall_vectored_\name\()_restore_regs:
187	li	r3,0
188	mtmsrd	r3,1
189	mtspr	SPRN_SRR0,r4
190	mtspr	SPRN_SRR1,r5
191
192	ld	r3,_CTR(r1)
193	ld	r4,_LINK(r1)
194	ld	r5,_XER(r1)
195
196	REST_NVGPRS(r1)
197	ld	r0,GPR0(r1)
198	mtcr	r2
199	mtctr	r3
200	mtlr	r4
201	mtspr	SPRN_XER,r5
202	REST_10GPRS(2, r1)
203	REST_2GPRS(12, r1)
204	ld	r1,GPR1(r1)
205	RFI_TO_USER
206.endm
207
208system_call_vectored common 0x3000
209/*
210 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
211 * which is tested by system_call_exception when r0 is -1 (as set by vector
212 * entry code).
213 */
214system_call_vectored sigill 0x7ff0
215
216
217/*
218 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
219 */
220	.globl system_call_vectored_emulate
221system_call_vectored_emulate:
222_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
223	li	r10,IRQS_ALL_DISABLED
224	stb	r10,PACAIRQSOFTMASK(r13)
225	b	system_call_vectored_common
226#endif
227
228	.balign IFETCH_ALIGN_BYTES
229	.globl system_call_common
230system_call_common:
231_ASM_NOKPROBE_SYMBOL(system_call_common)
232#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233BEGIN_FTR_SECTION
234	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
235	bne	.Ltabort_syscall
236END_FTR_SECTION_IFSET(CPU_FTR_TM)
237#endif
238	mr	r10,r1
239	ld	r1,PACAKSAVE(r13)
240	std	r10,0(r1)
241	std	r11,_NIP(r1)
242	std	r12,_MSR(r1)
243	std	r0,GPR0(r1)
244	std	r10,GPR1(r1)
245	std	r2,GPR2(r1)
246#ifdef CONFIG_PPC_FSL_BOOK3E
247START_BTB_FLUSH_SECTION
248	BTB_FLUSH(r10)
249END_BTB_FLUSH_SECTION
250#endif
251	ld	r2,PACATOC(r13)
252	mfcr	r12
253	li	r11,0
254	/* Can we avoid saving r3-r8 in common case? */
255	std	r3,GPR3(r1)
256	std	r4,GPR4(r1)
257	std	r5,GPR5(r1)
258	std	r6,GPR6(r1)
259	std	r7,GPR7(r1)
260	std	r8,GPR8(r1)
261	/* Zero r9-r12, this should only be required when restoring all GPRs */
262	std	r11,GPR9(r1)
263	std	r11,GPR10(r1)
264	std	r11,GPR11(r1)
265	std	r11,GPR12(r1)
266	std	r9,GPR13(r1)
267	SAVE_NVGPRS(r1)
268	std	r11,_XER(r1)
269	std	r11,_CTR(r1)
270	mflr	r10
271
272	/*
273	 * This clears CR0.SO (bit 28), which is the error indication on
274	 * return from this system call.
275	 */
276	rldimi	r12,r11,28,(63-28)
277	li	r11,0xc00
278	std	r10,_LINK(r1)
279	std	r11,_TRAP(r1)
280	std	r12,_CCR(r1)
281	std	r3,ORIG_GPR3(r1)
282	addi	r10,r1,STACK_FRAME_OVERHEAD
283	ld	r11,exception_marker@toc(r2)
284	std	r11,-16(r10)		/* "regshere" marker */
285
286	/*
287	 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
288	 * would clobber syscall parameters. Also we always enter with IRQs
289	 * enabled and nothing pending. system_call_exception() will call
290	 * trace_hardirqs_off().
291	 */
292	li	r11,IRQS_ALL_DISABLED
293	li	r12,PACA_IRQ_HARD_DIS
294	stb	r11,PACAIRQSOFTMASK(r13)
295	stb	r12,PACAIRQHAPPENED(r13)
296
297	/* Calling convention has r9 = orig r0, r10 = regs */
298	mr	r9,r0
299	bl	system_call_exception
300
301.Lsyscall_exit:
302	addi    r4,r1,STACK_FRAME_OVERHEAD
303	li	r5,0 /* !scv */
304	bl	syscall_exit_prepare
305
306	ld	r2,_CCR(r1)
307	ld	r4,_NIP(r1)
308	ld	r5,_MSR(r1)
309	ld	r6,_LINK(r1)
310
311BEGIN_FTR_SECTION
312	stdcx.	r0,0,r1			/* to clear the reservation */
313END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
314
315	mtspr	SPRN_SRR0,r4
316	mtspr	SPRN_SRR1,r5
317	mtlr	r6
318
319	cmpdi	r3,0
320	bne	.Lsyscall_restore_regs
321	/* Zero volatile regs that may contain sensitive kernel data */
322	li	r0,0
323	li	r4,0
324	li	r5,0
325	li	r6,0
326	li	r7,0
327	li	r8,0
328	li	r9,0
329	li	r10,0
330	li	r11,0
331	li	r12,0
332	mtctr	r0
333	mtspr	SPRN_XER,r0
334.Lsyscall_restore_regs_cont:
335
336BEGIN_FTR_SECTION
337	HMT_MEDIUM_LOW
338END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
339
340	/*
341	 * We don't need to restore AMR on the way back to userspace for KUAP.
342	 * The value of AMR only matters while we're in the kernel.
343	 */
344	mtcr	r2
345	ld	r2,GPR2(r1)
346	ld	r3,GPR3(r1)
347	ld	r13,GPR13(r1)
348	ld	r1,GPR1(r1)
349	RFI_TO_USER
350	b	.	/* prevent speculative execution */
351
352.Lsyscall_restore_regs:
353	ld	r3,_CTR(r1)
354	ld	r4,_XER(r1)
355	REST_NVGPRS(r1)
356	mtctr	r3
357	mtspr	SPRN_XER,r4
358	ld	r0,GPR0(r1)
359	REST_8GPRS(4, r1)
360	ld	r12,GPR12(r1)
361	b	.Lsyscall_restore_regs_cont
362
363#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
364.Ltabort_syscall:
365	/* Firstly we need to enable TM in the kernel */
366	mfmsr	r10
367	li	r9, 1
368	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
369	mtmsrd	r10, 0
370
371	/* tabort, this dooms the transaction, nothing else */
372	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
373	TABORT(R9)
374
375	/*
376	 * Return directly to userspace. We have corrupted user register state,
377	 * but userspace will never see that register state. Execution will
378	 * resume after the tbegin of the aborted transaction with the
379	 * checkpointed register state.
380	 */
381	li	r9, MSR_RI
382	andc	r10, r10, r9
383	mtmsrd	r10, 1
384	mtspr	SPRN_SRR0, r11
385	mtspr	SPRN_SRR1, r12
386	RFI_TO_USER
387	b	.	/* prevent speculative execution */
388#endif
389
390#ifdef CONFIG_PPC_BOOK3S
391_GLOBAL(ret_from_fork_scv)
392	bl	schedule_tail
393	REST_NVGPRS(r1)
394	li	r3,0	/* fork() return value */
395	b	.Lsyscall_vectored_common_exit
396#endif
397
398_GLOBAL(ret_from_fork)
399	bl	schedule_tail
400	REST_NVGPRS(r1)
401	li	r3,0	/* fork() return value */
402	b	.Lsyscall_exit
403
404_GLOBAL(ret_from_kernel_thread)
405	bl	schedule_tail
406	REST_NVGPRS(r1)
407	mtctr	r14
408	mr	r3,r15
409#ifdef PPC64_ELF_ABI_v2
410	mr	r12,r14
411#endif
412	bctrl
413	li	r3,0
414	b	.Lsyscall_exit
415
416#ifdef CONFIG_PPC_BOOK3E
417/* Save non-volatile GPRs, if not already saved. */
418_GLOBAL(save_nvgprs)
419	ld	r11,_TRAP(r1)
420	andi.	r0,r11,1
421	beqlr-
422	SAVE_NVGPRS(r1)
423	clrrdi	r0,r11,1
424	std	r0,_TRAP(r1)
425	blr
426_ASM_NOKPROBE_SYMBOL(save_nvgprs);
427#endif
428
429#ifdef CONFIG_PPC_BOOK3S_64
430
431#define FLUSH_COUNT_CACHE	\
4321:	nop;			\
433	patch_site 1b, patch__call_flush_branch_caches1; \
4341:	nop;			\
435	patch_site 1b, patch__call_flush_branch_caches2; \
4361:	nop;			\
437	patch_site 1b, patch__call_flush_branch_caches3
438
439.macro nops number
440	.rept \number
441	nop
442	.endr
443.endm
444
445.balign 32
446.global flush_branch_caches
447flush_branch_caches:
448	/* Save LR into r9 */
449	mflr	r9
450
451	// Flush the link stack
452	.rept 64
453	bl	.+4
454	.endr
455	b	1f
456	nops	6
457
458	.balign 32
459	/* Restore LR */
4601:	mtlr	r9
461
462	// If we're just flushing the link stack, return here
4633:	nop
464	patch_site 3b patch__flush_link_stack_return
465
466	li	r9,0x7fff
467	mtctr	r9
468
469	PPC_BCCTR_FLUSH
470
4712:	nop
472	patch_site 2b patch__flush_count_cache_return
473
474	nops	3
475
476	.rept 278
477	.balign 32
478	PPC_BCCTR_FLUSH
479	nops	7
480	.endr
481
482	blr
483#else
484#define FLUSH_COUNT_CACHE
485#endif /* CONFIG_PPC_BOOK3S_64 */
486
487/*
488 * This routine switches between two different tasks.  The process
489 * state of one is saved on its kernel stack.  Then the state
490 * of the other is restored from its kernel stack.  The memory
491 * management hardware is updated to the second process's state.
492 * Finally, we can return to the second process, via interrupt_return.
493 * On entry, r3 points to the THREAD for the current task, r4
494 * points to the THREAD for the new task.
495 *
496 * Note: there are two ways to get to the "going out" portion
497 * of this code; either by coming in via the entry (_switch)
498 * or via "fork" which must set up an environment equivalent
499 * to the "_switch" path.  If you change this you'll have to change
500 * the fork code also.
501 *
502 * The code which creates the new task context is in 'copy_thread'
503 * in arch/powerpc/kernel/process.c
504 */
505	.align	7
506_GLOBAL(_switch)
507	mflr	r0
508	std	r0,16(r1)
509	stdu	r1,-SWITCH_FRAME_SIZE(r1)
510	/* r3-r13 are caller saved -- Cort */
511	SAVE_NVGPRS(r1)
512	std	r0,_NIP(r1)	/* Return to switch caller */
513	mfcr	r23
514	std	r23,_CCR(r1)
515	std	r1,KSP(r3)	/* Set old stack pointer */
516
517	kuap_check_amr r9, r10
518
519	FLUSH_COUNT_CACHE	/* Clobbers r9, ctr */
520
521	/*
522	 * On SMP kernels, care must be taken because a task may be
523	 * scheduled off CPUx and on to CPUy. Memory ordering must be
524	 * considered.
525	 *
526	 * Cacheable stores on CPUx will be visible when the task is
527	 * scheduled on CPUy by virtue of the core scheduler barriers
528	 * (see "Notes on Program-Order guarantees on SMP systems." in
529	 * kernel/sched/core.c).
530	 *
531	 * Uncacheable stores in the case of involuntary preemption must
532	 * be taken care of. The smp_mb__after_spinlock() in __schedule()
533	 * is implemented as hwsync on powerpc, which orders MMIO too. So
534	 * long as there is an hwsync in the context switch path, it will
535	 * be executed on the source CPU after the task has performed
536	 * all MMIO ops on that CPU, and on the destination CPU before the
537	 * task performs any MMIO ops there.
538	 */
539
540	/*
541	 * The kernel context switch path must contain a spin_lock,
542	 * which contains larx/stcx, which will clear any reservation
543	 * of the task being switched.
544	 */
545#ifdef CONFIG_PPC_BOOK3S
546/* Cancel all explict user streams as they will have no use after context
547 * switch and will stop the HW from creating streams itself
548 */
549	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
550#endif
551
552	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
553	std	r6,PACACURRENT(r13)	/* Set new 'current' */
554#if defined(CONFIG_STACKPROTECTOR)
555	ld	r6, TASK_CANARY(r6)
556	std	r6, PACA_CANARY(r13)
557#endif
558
559	ld	r8,KSP(r4)	/* new stack pointer */
560#ifdef CONFIG_PPC_BOOK3S_64
561BEGIN_MMU_FTR_SECTION
562	b	2f
563END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
564BEGIN_FTR_SECTION
565	clrrdi	r6,r8,28	/* get its ESID */
566	clrrdi	r9,r1,28	/* get current sp ESID */
567FTR_SECTION_ELSE
568	clrrdi	r6,r8,40	/* get its 1T ESID */
569	clrrdi	r9,r1,40	/* get current sp 1T ESID */
570ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
571	clrldi.	r0,r6,2		/* is new ESID c00000000? */
572	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
573	cror	eq,4*cr1+eq,eq
574	beq	2f		/* if yes, don't slbie it */
575
576	/* Bolt in the new stack SLB entry */
577	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
578	oris	r0,r6,(SLB_ESID_V)@h
579	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
580BEGIN_FTR_SECTION
581	li	r9,MMU_SEGSIZE_1T	/* insert B field */
582	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
583	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
584END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
585
586	/* Update the last bolted SLB.  No write barriers are needed
587	 * here, provided we only update the current CPU's SLB shadow
588	 * buffer.
589	 */
590	ld	r9,PACA_SLBSHADOWPTR(r13)
591	li	r12,0
592	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
593	li	r12,SLBSHADOW_STACKVSID
594	STDX_BE	r7,r12,r9			/* Save VSID */
595	li	r12,SLBSHADOW_STACKESID
596	STDX_BE	r0,r12,r9			/* Save ESID */
597
598	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
599	 * we have 1TB segments, the only CPUs known to have the errata
600	 * only support less than 1TB of system memory and we'll never
601	 * actually hit this code path.
602	 */
603
604	isync
605	slbie	r6
606BEGIN_FTR_SECTION
607	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
608END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
609	slbmte	r7,r0
610	isync
6112:
612#endif /* CONFIG_PPC_BOOK3S_64 */
613
614	clrrdi	r7, r8, THREAD_SHIFT	/* base of new stack */
615	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
616	   because we don't need to leave the 288-byte ABI gap at the
617	   top of the kernel stack. */
618	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
619
620	/*
621	 * PMU interrupts in radix may come in here. They will use r1, not
622	 * PACAKSAVE, so this stack switch will not cause a problem. They
623	 * will store to the process stack, which may then be migrated to
624	 * another CPU. However the rq lock release on this CPU paired with
625	 * the rq lock acquire on the new CPU before the stack becomes
626	 * active on the new CPU, will order those stores.
627	 */
628	mr	r1,r8		/* start using new stack pointer */
629	std	r7,PACAKSAVE(r13)
630
631	ld	r6,_CCR(r1)
632	mtcrf	0xFF,r6
633
634	/* r3-r13 are destroyed -- Cort */
635	REST_NVGPRS(r1)
636
637	/* convert old thread to its task_struct for return value */
638	addi	r3,r3,-THREAD
639	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
640	mtlr	r7
641	addi	r1,r1,SWITCH_FRAME_SIZE
642	blr
643
644#ifdef CONFIG_PPC_BOOK3S
645	/*
646	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
647	 * touched, no exit work created, then this can be used.
648	 */
649	.balign IFETCH_ALIGN_BYTES
650	.globl fast_interrupt_return
651fast_interrupt_return:
652_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
653	kuap_check_amr r3, r4
654	ld	r5,_MSR(r1)
655	andi.	r0,r5,MSR_PR
656	bne	.Lfast_user_interrupt_return
657	kuap_restore_amr r3, r4
658	andi.	r0,r5,MSR_RI
659	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
660	bne+	.Lfast_kernel_interrupt_return
661	addi	r3,r1,STACK_FRAME_OVERHEAD
662	bl	unrecoverable_exception
663	b	. /* should not get here */
664
665	.balign IFETCH_ALIGN_BYTES
666	.globl interrupt_return
667interrupt_return:
668_ASM_NOKPROBE_SYMBOL(interrupt_return)
669	ld	r4,_MSR(r1)
670	andi.	r0,r4,MSR_PR
671	beq	.Lkernel_interrupt_return
672	addi	r3,r1,STACK_FRAME_OVERHEAD
673	bl	interrupt_exit_user_prepare
674	cmpdi	r3,0
675	bne-	.Lrestore_nvgprs
676
677.Lfast_user_interrupt_return:
678	ld	r11,_NIP(r1)
679	ld	r12,_MSR(r1)
680BEGIN_FTR_SECTION
681	ld	r10,_PPR(r1)
682	mtspr	SPRN_PPR,r10
683END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
684	mtspr	SPRN_SRR0,r11
685	mtspr	SPRN_SRR1,r12
686
687BEGIN_FTR_SECTION
688	stdcx.	r0,0,r1		/* to clear the reservation */
689FTR_SECTION_ELSE
690	ldarx	r0,0,r1
691ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
692
693	ld	r3,_CCR(r1)
694	ld	r4,_LINK(r1)
695	ld	r5,_CTR(r1)
696	ld	r6,_XER(r1)
697	li	r0,0
698
699	REST_4GPRS(7, r1)
700	REST_2GPRS(11, r1)
701	REST_GPR(13, r1)
702
703	mtcr	r3
704	mtlr	r4
705	mtctr	r5
706	mtspr	SPRN_XER,r6
707
708	REST_4GPRS(2, r1)
709	REST_GPR(6, r1)
710	REST_GPR(0, r1)
711	REST_GPR(1, r1)
712	RFI_TO_USER
713	b	.	/* prevent speculative execution */
714
715.Lrestore_nvgprs:
716	REST_NVGPRS(r1)
717	b	.Lfast_user_interrupt_return
718
719	.balign IFETCH_ALIGN_BYTES
720.Lkernel_interrupt_return:
721	addi	r3,r1,STACK_FRAME_OVERHEAD
722	bl	interrupt_exit_kernel_prepare
723
724.Lfast_kernel_interrupt_return:
725	cmpdi	cr1,r3,0
726	ld	r11,_NIP(r1)
727	ld	r12,_MSR(r1)
728	mtspr	SPRN_SRR0,r11
729	mtspr	SPRN_SRR1,r12
730
731BEGIN_FTR_SECTION
732	stdcx.	r0,0,r1		/* to clear the reservation */
733FTR_SECTION_ELSE
734	ldarx	r0,0,r1
735ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
736
737	ld	r3,_LINK(r1)
738	ld	r4,_CTR(r1)
739	ld	r5,_XER(r1)
740	ld	r6,_CCR(r1)
741	li	r0,0
742
743	REST_4GPRS(7, r1)
744	REST_2GPRS(11, r1)
745
746	mtlr	r3
747	mtctr	r4
748	mtspr	SPRN_XER,r5
749
750	/*
751	 * Leaving a stale exception_marker on the stack can confuse
752	 * the reliable stack unwinder later on. Clear it.
753	 */
754	std	r0,STACK_FRAME_OVERHEAD-16(r1)
755
756	REST_4GPRS(2, r1)
757
758	bne-	cr1,1f /* emulate stack store */
759	mtcr	r6
760	REST_GPR(6, r1)
761	REST_GPR(0, r1)
762	REST_GPR(1, r1)
763	RFI_TO_KERNEL
764	b	.	/* prevent speculative execution */
765
7661:	/*
767	 * Emulate stack store with update. New r1 value was already calculated
768	 * and updated in our interrupt regs by emulate_loadstore, but we can't
769	 * store the previous value of r1 to the stack before re-loading our
770	 * registers from it, otherwise they could be clobbered.  Use
771	 * PACA_EXGEN as temporary storage to hold the store data, as
772	 * interrupts are disabled here so it won't be clobbered.
773	 */
774	mtcr	r6
775	std	r9,PACA_EXGEN+0(r13)
776	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
777	REST_GPR(6, r1)
778	REST_GPR(0, r1)
779	REST_GPR(1, r1)
780	std	r9,0(r1) /* perform store component of stdu */
781	ld	r9,PACA_EXGEN+0(r13)
782
783	RFI_TO_KERNEL
784	b	.	/* prevent speculative execution */
785#endif /* CONFIG_PPC_BOOK3S */
786
787#ifdef CONFIG_PPC_RTAS
788/*
789 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
790 * called with the MMU off.
791 *
792 * In addition, we need to be in 32b mode, at least for now.
793 *
794 * Note: r3 is an input parameter to rtas, so don't trash it...
795 */
796_GLOBAL(enter_rtas)
797	mflr	r0
798	std	r0,16(r1)
799        stdu	r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
800
801	/* Because RTAS is running in 32b mode, it clobbers the high order half
802	 * of all registers that it saves.  We therefore save those registers
803	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
804   	 */
805	SAVE_GPR(2, r1)			/* Save the TOC */
806	SAVE_GPR(13, r1)		/* Save paca */
807	SAVE_NVGPRS(r1)			/* Save the non-volatiles */
808
809	mfcr	r4
810	std	r4,_CCR(r1)
811	mfctr	r5
812	std	r5,_CTR(r1)
813	mfspr	r6,SPRN_XER
814	std	r6,_XER(r1)
815	mfdar	r7
816	std	r7,_DAR(r1)
817	mfdsisr	r8
818	std	r8,_DSISR(r1)
819
820	/* Temporary workaround to clear CR until RTAS can be modified to
821	 * ignore all bits.
822	 */
823	li	r0,0
824	mtcr	r0
825
826#ifdef CONFIG_BUG
827	/* There is no way it is acceptable to get here with interrupts enabled,
828	 * check it with the asm equivalent of WARN_ON
829	 */
830	lbz	r0,PACAIRQSOFTMASK(r13)
8311:	tdeqi	r0,IRQS_ENABLED
832	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
833#endif
834
835	/* Hard-disable interrupts */
836	mfmsr	r6
837	rldicl	r7,r6,48,1
838	rotldi	r7,r7,16
839	mtmsrd	r7,1
840
841	/* Unfortunately, the stack pointer and the MSR are also clobbered,
842	 * so they are saved in the PACA which allows us to restore
843	 * our original state after RTAS returns.
844         */
845	std	r1,PACAR1(r13)
846        std	r6,PACASAVEDMSR(r13)
847
848	/* Setup our real return addr */
849	LOAD_REG_ADDR(r4,rtas_return_loc)
850	clrldi	r4,r4,2			/* convert to realmode address */
851       	mtlr	r4
852
853	li	r0,0
854	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
855	andc	r0,r6,r0
856
857        li      r9,1
858        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
859	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
860	andc	r6,r0,r9
861
862__enter_rtas:
863	sync				/* disable interrupts so SRR0/1 */
864	mtmsrd	r0			/* don't get trashed */
865
866	LOAD_REG_ADDR(r4, rtas)
867	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
868	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
869
870	mtspr	SPRN_SRR0,r5
871	mtspr	SPRN_SRR1,r6
872	RFI_TO_KERNEL
873	b	.	/* prevent speculative execution */
874
875rtas_return_loc:
876	FIXUP_ENDIAN
877
878	/*
879	 * Clear RI and set SF before anything.
880	 */
881	mfmsr   r6
882	li	r0,MSR_RI
883	andc	r6,r6,r0
884	sldi	r0,r0,(MSR_SF_LG - MSR_RI_LG)
885	or	r6,r6,r0
886	sync
887	mtmsrd  r6
888
889	/* relocation is off at this point */
890	GET_PACA(r4)
891	clrldi	r4,r4,2			/* convert to realmode address */
892
893	bcl	20,31,$+4
8940:	mflr	r3
895	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
896
897        ld	r1,PACAR1(r4)           /* Restore our SP */
898        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
899
900	mtspr	SPRN_SRR0,r3
901	mtspr	SPRN_SRR1,r4
902	RFI_TO_KERNEL
903	b	.	/* prevent speculative execution */
904_ASM_NOKPROBE_SYMBOL(__enter_rtas)
905_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
906
907	.align	3
9081:	.8byte	rtas_restore_regs
909
910rtas_restore_regs:
911	/* relocation is on at this point */
912	REST_GPR(2, r1)			/* Restore the TOC */
913	REST_GPR(13, r1)		/* Restore paca */
914	REST_NVGPRS(r1)			/* Restore the non-volatiles */
915
916	GET_PACA(r13)
917
918	ld	r4,_CCR(r1)
919	mtcr	r4
920	ld	r5,_CTR(r1)
921	mtctr	r5
922	ld	r6,_XER(r1)
923	mtspr	SPRN_XER,r6
924	ld	r7,_DAR(r1)
925	mtdar	r7
926	ld	r8,_DSISR(r1)
927	mtdsisr	r8
928
929        addi	r1,r1,SWITCH_FRAME_SIZE	/* Unstack our frame */
930	ld	r0,16(r1)		/* get return address */
931
932	mtlr    r0
933        blr				/* return to caller */
934
935#endif /* CONFIG_PPC_RTAS */
936
937_GLOBAL(enter_prom)
938	mflr	r0
939	std	r0,16(r1)
940        stdu	r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
941
942	/* Because PROM is running in 32b mode, it clobbers the high order half
943	 * of all registers that it saves.  We therefore save those registers
944	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
945   	 */
946	SAVE_GPR(2, r1)
947	SAVE_GPR(13, r1)
948	SAVE_NVGPRS(r1)
949	mfcr	r10
950	mfmsr	r11
951	std	r10,_CCR(r1)
952	std	r11,_MSR(r1)
953
954	/* Put PROM address in SRR0 */
955	mtsrr0	r4
956
957	/* Setup our trampoline return addr in LR */
958	bcl	20,31,$+4
9590:	mflr	r4
960	addi	r4,r4,(1f - 0b)
961       	mtlr	r4
962
963	/* Prepare a 32-bit mode big endian MSR
964	 */
965#ifdef CONFIG_PPC_BOOK3E
966	rlwinm	r11,r11,0,1,31
967	mtsrr1	r11
968	rfi
969#else /* CONFIG_PPC_BOOK3E */
970	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
971	andc	r11,r11,r12
972	mtsrr1	r11
973	RFI_TO_KERNEL
974#endif /* CONFIG_PPC_BOOK3E */
975
9761:	/* Return from OF */
977	FIXUP_ENDIAN
978
979	/* Just make sure that r1 top 32 bits didn't get
980	 * corrupt by OF
981	 */
982	rldicl	r1,r1,0,32
983
984	/* Restore the MSR (back to 64 bits) */
985	ld	r0,_MSR(r1)
986	MTMSRD(r0)
987        isync
988
989	/* Restore other registers */
990	REST_GPR(2, r1)
991	REST_GPR(13, r1)
992	REST_NVGPRS(r1)
993	ld	r4,_CCR(r1)
994	mtcr	r4
995
996        addi	r1,r1,SWITCH_FRAME_SIZE
997	ld	r0,16(r1)
998	mtlr    r0
999        blr
1000