1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * PowerPC Memory Protection Keys management
4  *
5  * Copyright 2017, Ram Pai, IBM Corporation.
6  */
7 
8 #ifndef _ASM_POWERPC_KEYS_H
9 #define _ASM_POWERPC_KEYS_H
10 
11 #include <linux/jump_label.h>
12 #include <asm/firmware.h>
13 
14 DECLARE_STATIC_KEY_TRUE(pkey_disabled);
15 extern int pkeys_total; /* total pkeys as per device tree */
16 extern u32 initial_allocation_mask; /*  bits set for the initially allocated keys */
17 extern u32 reserved_allocation_mask; /* bits set for reserved keys */
18 
19 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
20 			    VM_PKEY_BIT3 | VM_PKEY_BIT4)
21 
22 /* Override any generic PKEY permission defines */
23 #define PKEY_DISABLE_EXECUTE   0x4
24 #define PKEY_ACCESS_MASK       (PKEY_DISABLE_ACCESS | \
25 				PKEY_DISABLE_WRITE  | \
26 				PKEY_DISABLE_EXECUTE)
27 
pkey_to_vmflag_bits(u16 pkey)28 static inline u64 pkey_to_vmflag_bits(u16 pkey)
29 {
30 	return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
31 }
32 
vmflag_to_pte_pkey_bits(u64 vm_flags)33 static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
34 {
35 	if (static_branch_likely(&pkey_disabled))
36 		return 0x0UL;
37 
38 	return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) |
39 		((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) |
40 		((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
41 		((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) |
42 		((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL));
43 }
44 
vma_pkey(struct vm_area_struct * vma)45 static inline int vma_pkey(struct vm_area_struct *vma)
46 {
47 	if (static_branch_likely(&pkey_disabled))
48 		return 0;
49 	return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
50 }
51 
52 #define arch_max_pkey() pkeys_total
53 
pte_to_hpte_pkey_bits(u64 pteflags)54 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
55 {
56 	return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) |
57 		((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
58 		((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
59 		((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
60 		((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL));
61 }
62 
pte_to_pkey_bits(u64 pteflags)63 static inline u16 pte_to_pkey_bits(u64 pteflags)
64 {
65 	return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) |
66 		((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) |
67 		((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
68 		((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) |
69 		((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL));
70 }
71 
72 #define pkey_alloc_mask(pkey) (0x1 << pkey)
73 
74 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
75 
76 #define __mm_pkey_allocated(mm, pkey) {	\
77 	mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
78 }
79 
80 #define __mm_pkey_free(mm, pkey) {	\
81 	mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey);	\
82 }
83 
84 #define __mm_pkey_is_allocated(mm, pkey)	\
85 	(mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
86 
87 #define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
88 				       pkey_alloc_mask(pkey))
89 
mm_pkey_is_allocated(struct mm_struct * mm,int pkey)90 static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
91 {
92 	if (pkey < 0 || pkey >= arch_max_pkey())
93 		return false;
94 
95 	/* Reserved keys are never allocated. */
96 	if (__mm_pkey_is_reserved(pkey))
97 		return false;
98 
99 	return __mm_pkey_is_allocated(mm, pkey);
100 }
101 
102 /*
103  * Returns a positive, 5-bit key on success, or -1 on failure.
104  * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
105  * mm_pkey_free().
106  */
mm_pkey_alloc(struct mm_struct * mm)107 static inline int mm_pkey_alloc(struct mm_struct *mm)
108 {
109 	/*
110 	 * Note: this is the one and only place we make sure that the pkey is
111 	 * valid as far as the hardware is concerned. The rest of the kernel
112 	 * trusts that only good, valid pkeys come out of here.
113 	 */
114 	u32 all_pkeys_mask = (u32)(~(0x0));
115 	int ret;
116 
117 	if (static_branch_likely(&pkey_disabled))
118 		return -1;
119 
120 	/*
121 	 * Are we out of pkeys? We must handle this specially because ffz()
122 	 * behavior is undefined if there are no zeros.
123 	 */
124 	if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
125 		return -1;
126 
127 	ret = ffz((u32)mm_pkey_allocation_map(mm));
128 	__mm_pkey_allocated(mm, ret);
129 
130 	return ret;
131 }
132 
mm_pkey_free(struct mm_struct * mm,int pkey)133 static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
134 {
135 	if (static_branch_likely(&pkey_disabled))
136 		return -1;
137 
138 	if (!mm_pkey_is_allocated(mm, pkey))
139 		return -EINVAL;
140 
141 	__mm_pkey_free(mm, pkey);
142 
143 	return 0;
144 }
145 
146 /*
147  * Try to dedicate one of the protection keys to be used as an
148  * execute-only protection key.
149  */
150 extern int __execute_only_pkey(struct mm_struct *mm);
execute_only_pkey(struct mm_struct * mm)151 static inline int execute_only_pkey(struct mm_struct *mm)
152 {
153 	if (static_branch_likely(&pkey_disabled))
154 		return -1;
155 
156 	return __execute_only_pkey(mm);
157 }
158 
159 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
160 					 int prot, int pkey);
arch_override_mprotect_pkey(struct vm_area_struct * vma,int prot,int pkey)161 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
162 					      int prot, int pkey)
163 {
164 	if (static_branch_likely(&pkey_disabled))
165 		return 0;
166 
167 	/*
168 	 * Is this an mprotect_pkey() call? If so, never override the value that
169 	 * came from the user.
170 	 */
171 	if (pkey != -1)
172 		return pkey;
173 
174 	return __arch_override_mprotect_pkey(vma, prot, pkey);
175 }
176 
177 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
178 				       unsigned long init_val);
arch_set_user_pkey_access(struct task_struct * tsk,int pkey,unsigned long init_val)179 static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
180 					    unsigned long init_val)
181 {
182 	if (static_branch_likely(&pkey_disabled))
183 		return -EINVAL;
184 
185 	/*
186 	 * userspace should not change pkey-0 permissions.
187 	 * pkey-0 is associated with every page in the kernel.
188 	 * If userspace denies any permission on pkey-0, the
189 	 * kernel cannot operate.
190 	 */
191 	if (pkey == 0)
192 		return init_val ? -EINVAL : 0;
193 
194 	return __arch_set_user_pkey_access(tsk, pkey, init_val);
195 }
196 
arch_pkeys_enabled(void)197 static inline bool arch_pkeys_enabled(void)
198 {
199 	return !static_branch_likely(&pkey_disabled);
200 }
201 
202 extern void pkey_mm_init(struct mm_struct *mm);
203 extern bool arch_supports_pkeys(int cap);
204 extern unsigned int arch_usable_pkeys(void);
205 extern void thread_pkey_regs_save(struct thread_struct *thread);
206 extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
207 				     struct thread_struct *old_thread);
208 extern void thread_pkey_regs_init(struct thread_struct *thread);
209 #endif /*_ASM_POWERPC_KEYS_H */
210