1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
3 #define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
4 
5 #include <linux/const.h>
6 
7 #define AMR_KUAP_BLOCK_READ	UL(0x4000000000000000)
8 #define AMR_KUAP_BLOCK_WRITE	UL(0x8000000000000000)
9 #define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
10 #define AMR_KUAP_SHIFT		62
11 
12 #ifdef __ASSEMBLY__
13 
14 .macro kuap_restore_amr	gpr
15 #ifdef CONFIG_PPC_KUAP
16 	BEGIN_MMU_FTR_SECTION_NESTED(67)
17 	ld	\gpr, STACK_REGS_KUAP(r1)
18 	mtspr	SPRN_AMR, \gpr
19 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
20 #endif
21 .endm
22 
23 .macro kuap_check_amr gpr1, gpr2
24 #ifdef CONFIG_PPC_KUAP_DEBUG
25 	BEGIN_MMU_FTR_SECTION_NESTED(67)
26 	mfspr	\gpr1, SPRN_AMR
27 	li	\gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
28 	sldi	\gpr2, \gpr2, AMR_KUAP_SHIFT
29 999:	tdne	\gpr1, \gpr2
30 	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
31 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
32 #endif
33 .endm
34 
35 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
36 #ifdef CONFIG_PPC_KUAP
37 	BEGIN_MMU_FTR_SECTION_NESTED(67)
38 	.ifnb \msr_pr_cr
39 	bne	\msr_pr_cr, 99f
40 	.endif
41 	mfspr	\gpr1, SPRN_AMR
42 	std	\gpr1, STACK_REGS_KUAP(r1)
43 	li	\gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
44 	sldi	\gpr2, \gpr2, AMR_KUAP_SHIFT
45 	cmpd	\use_cr, \gpr1, \gpr2
46 	beq	\use_cr, 99f
47 	// We don't isync here because we very recently entered via rfid
48 	mtspr	SPRN_AMR, \gpr2
49 	isync
50 99:
51 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
52 #endif
53 .endm
54 
55 #else /* !__ASSEMBLY__ */
56 
57 #ifdef CONFIG_PPC_KUAP
58 
59 #include <asm/reg.h>
60 
61 /*
62  * We support individually allowing read or write, but we don't support nesting
63  * because that would require an expensive read/modify write of the AMR.
64  */
65 
66 static inline void set_kuap(unsigned long value)
67 {
68 	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
69 		return;
70 
71 	/*
72 	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
73 	 * before and after the move to AMR. See table 6 on page 1134.
74 	 */
75 	isync();
76 	mtspr(SPRN_AMR, value);
77 	isync();
78 }
79 
80 static inline void allow_user_access(void __user *to, const void __user *from,
81 				     unsigned long size)
82 {
83 	// This is written so we can resolve to a single case at build time
84 	if (__builtin_constant_p(to) && to == NULL)
85 		set_kuap(AMR_KUAP_BLOCK_WRITE);
86 	else if (__builtin_constant_p(from) && from == NULL)
87 		set_kuap(AMR_KUAP_BLOCK_READ);
88 	else
89 		set_kuap(0);
90 }
91 
92 static inline void prevent_user_access(void __user *to, const void __user *from,
93 				       unsigned long size)
94 {
95 	set_kuap(AMR_KUAP_BLOCKED);
96 }
97 
98 static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
99 {
100 	return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
101 		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
102 		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
103 }
104 #endif /* CONFIG_PPC_KUAP */
105 
106 #endif /* __ASSEMBLY__ */
107 
108 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
109