1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PKEYS_H
3 #define _ASM_X86_PKEYS_H
4
5 #define ARCH_DEFAULT_PKEY 0
6
7 #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
8
9 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
10 unsigned long init_val);
11
arch_pkeys_enabled(void)12 static inline bool arch_pkeys_enabled(void)
13 {
14 return boot_cpu_has(X86_FEATURE_OSPKE);
15 }
16
17 /*
18 * Try to dedicate one of the protection keys to be used as an
19 * execute-only protection key.
20 */
21 extern int __execute_only_pkey(struct mm_struct *mm);
execute_only_pkey(struct mm_struct * mm)22 static inline int execute_only_pkey(struct mm_struct *mm)
23 {
24 if (!boot_cpu_has(X86_FEATURE_OSPKE))
25 return ARCH_DEFAULT_PKEY;
26
27 return __execute_only_pkey(mm);
28 }
29
30 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
31 int prot, int pkey);
arch_override_mprotect_pkey(struct vm_area_struct * vma,int prot,int pkey)32 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
33 int prot, int pkey)
34 {
35 if (!boot_cpu_has(X86_FEATURE_OSPKE))
36 return 0;
37
38 return __arch_override_mprotect_pkey(vma, prot, pkey);
39 }
40
41 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
42 unsigned long init_val);
43
44 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3)
45
46 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
47 #define mm_set_pkey_allocated(mm, pkey) do { \
48 mm_pkey_allocation_map(mm) |= (1U << pkey); \
49 } while (0)
50 #define mm_set_pkey_free(mm, pkey) do { \
51 mm_pkey_allocation_map(mm) &= ~(1U << pkey); \
52 } while (0)
53
54 static inline
mm_pkey_is_allocated(struct mm_struct * mm,int pkey)55 bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
56 {
57 /*
58 * "Allocated" pkeys are those that have been returned
59 * from pkey_alloc() or pkey 0 which is allocated
60 * implicitly when the mm is created.
61 */
62 if (pkey < 0)
63 return false;
64 if (pkey >= arch_max_pkey())
65 return false;
66 /*
67 * The exec-only pkey is set in the allocation map, but
68 * is not available to any of the user interfaces like
69 * mprotect_pkey().
70 */
71 if (pkey == mm->context.execute_only_pkey)
72 return false;
73
74 return mm_pkey_allocation_map(mm) & (1U << pkey);
75 }
76
77 /*
78 * Returns a positive, 4-bit key on success, or -1 on failure.
79 */
80 static inline
mm_pkey_alloc(struct mm_struct * mm)81 int mm_pkey_alloc(struct mm_struct *mm)
82 {
83 /*
84 * Note: this is the one and only place we make sure
85 * that the pkey is valid as far as the hardware is
86 * concerned. The rest of the kernel trusts that
87 * only good, valid pkeys come out of here.
88 */
89 u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1);
90 int ret;
91
92 /*
93 * Are we out of pkeys? We must handle this specially
94 * because ffz() behavior is undefined if there are no
95 * zeros.
96 */
97 if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
98 return -1;
99
100 ret = ffz(mm_pkey_allocation_map(mm));
101
102 mm_set_pkey_allocated(mm, ret);
103
104 return ret;
105 }
106
107 static inline
mm_pkey_free(struct mm_struct * mm,int pkey)108 int mm_pkey_free(struct mm_struct *mm, int pkey)
109 {
110 if (!mm_pkey_is_allocated(mm, pkey))
111 return -EINVAL;
112
113 mm_set_pkey_free(mm, pkey);
114
115 return 0;
116 }
117
118 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
119 unsigned long init_val);
120 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
121 unsigned long init_val);
122 extern void copy_init_pkru_to_fpregs(void);
123
vma_pkey(struct vm_area_struct * vma)124 static inline int vma_pkey(struct vm_area_struct *vma)
125 {
126 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
127 VM_PKEY_BIT2 | VM_PKEY_BIT3;
128
129 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
130 }
131
132 #endif /*_ASM_X86_PKEYS_H */
133