1 /*
2 * Based on arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef __ASM_MMU_CONTEXT_H
20 #define __ASM_MMU_CONTEXT_H
21
22 #ifndef __ASSEMBLY__
23
24 #include <linux/compiler.h>
25 #include <linux/sched.h>
26 #include <linux/sched/hotplug.h>
27 #include <linux/mm_types.h>
28
29 #include <asm/cacheflush.h>
30 #include <asm/cpufeature.h>
31 #include <asm/proc-fns.h>
32 #include <asm-generic/mm_hooks.h>
33 #include <asm/cputype.h>
34 #include <asm/pgtable.h>
35 #include <asm/sysreg.h>
36 #include <asm/tlbflush.h>
37
contextidr_thread_switch(struct task_struct * next)38 static inline void contextidr_thread_switch(struct task_struct *next)
39 {
40 if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
41 return;
42
43 write_sysreg(task_pid_nr(next), contextidr_el1);
44 isb();
45 }
46
47 /*
48 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
49 */
cpu_set_reserved_ttbr0(void)50 static inline void cpu_set_reserved_ttbr0(void)
51 {
52 unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
53
54 write_sysreg(ttbr, ttbr0_el1);
55 isb();
56 }
57
cpu_switch_mm(pgd_t * pgd,struct mm_struct * mm)58 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
59 {
60 BUG_ON(pgd == swapper_pg_dir);
61 cpu_set_reserved_ttbr0();
62 cpu_do_switch_mm(virt_to_phys(pgd),mm);
63 }
64
65 /*
66 * TCR.T0SZ value to use when the ID map is active. Usually equals
67 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
68 * physical memory, in which case it will be smaller.
69 */
70 extern u64 idmap_t0sz;
71 extern u64 idmap_ptrs_per_pgd;
72
__cpu_uses_extended_idmap(void)73 static inline bool __cpu_uses_extended_idmap(void)
74 {
75 return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
76 }
77
78 /*
79 * True if the extended ID map requires an extra level of translation table
80 * to be configured.
81 */
__cpu_uses_extended_idmap_level(void)82 static inline bool __cpu_uses_extended_idmap_level(void)
83 {
84 return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
85 }
86
87 /*
88 * Set TCR.T0SZ to its default value (based on VA_BITS)
89 */
__cpu_set_tcr_t0sz(unsigned long t0sz)90 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
91 {
92 unsigned long tcr;
93
94 if (!__cpu_uses_extended_idmap())
95 return;
96
97 tcr = read_sysreg(tcr_el1);
98 tcr &= ~TCR_T0SZ_MASK;
99 tcr |= t0sz << TCR_T0SZ_OFFSET;
100 write_sysreg(tcr, tcr_el1);
101 isb();
102 }
103
104 #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
105 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
106
107 /*
108 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
109 *
110 * The idmap lives in the same VA range as userspace, but uses global entries
111 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
112 * speculative TLB fetches, we must temporarily install the reserved page
113 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
114 *
115 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
116 * which should not be installed in TTBR0_EL1. In this case we can leave the
117 * reserved page tables in place.
118 */
cpu_uninstall_idmap(void)119 static inline void cpu_uninstall_idmap(void)
120 {
121 struct mm_struct *mm = current->active_mm;
122
123 cpu_set_reserved_ttbr0();
124 local_flush_tlb_all();
125 cpu_set_default_tcr_t0sz();
126
127 if (mm != &init_mm && !system_uses_ttbr0_pan())
128 cpu_switch_mm(mm->pgd, mm);
129 }
130
cpu_install_idmap(void)131 static inline void cpu_install_idmap(void)
132 {
133 cpu_set_reserved_ttbr0();
134 local_flush_tlb_all();
135 cpu_set_idmap_tcr_t0sz();
136
137 cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
138 }
139
140 /*
141 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
142 * avoiding the possibility of conflicting TLB entries being allocated.
143 */
cpu_replace_ttbr1(pgd_t * pgdp)144 static inline void cpu_replace_ttbr1(pgd_t *pgdp)
145 {
146 typedef void (ttbr_replace_func)(phys_addr_t);
147 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
148 ttbr_replace_func *replace_phys;
149
150 phys_addr_t pgd_phys = virt_to_phys(pgdp);
151
152 replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
153
154 cpu_install_idmap();
155 replace_phys(pgd_phys);
156 cpu_uninstall_idmap();
157 }
158
159 /*
160 * It would be nice to return ASIDs back to the allocator, but unfortunately
161 * that introduces a race with a generation rollover where we could erroneously
162 * free an ASID allocated in a future generation. We could workaround this by
163 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
164 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
165 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
166 * take CPU migration into account.
167 */
168 #define destroy_context(mm) do { } while(0)
169 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
170
171 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
172
173 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)174 static inline void update_saved_ttbr0(struct task_struct *tsk,
175 struct mm_struct *mm)
176 {
177 u64 ttbr;
178
179 if (!system_uses_ttbr0_pan())
180 return;
181
182 if (mm == &init_mm)
183 ttbr = __pa_symbol(empty_zero_page);
184 else
185 ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
186
187 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
188 }
189 #else
update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm)190 static inline void update_saved_ttbr0(struct task_struct *tsk,
191 struct mm_struct *mm)
192 {
193 }
194 #endif
195
196 static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)197 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
198 {
199 /*
200 * We don't actually care about the ttbr0 mapping, so point it at the
201 * zero page.
202 */
203 update_saved_ttbr0(tsk, &init_mm);
204 }
205
__switch_mm(struct mm_struct * next)206 static inline void __switch_mm(struct mm_struct *next)
207 {
208 unsigned int cpu = smp_processor_id();
209
210 /*
211 * init_mm.pgd does not contain any user mappings and it is always
212 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
213 */
214 if (next == &init_mm) {
215 cpu_set_reserved_ttbr0();
216 return;
217 }
218
219 check_and_switch_context(next, cpu);
220 }
221
222 static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)223 switch_mm(struct mm_struct *prev, struct mm_struct *next,
224 struct task_struct *tsk)
225 {
226 if (prev != next)
227 __switch_mm(next);
228
229 /*
230 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
231 * value may have not been initialised yet (activate_mm caller) or the
232 * ASID has changed since the last run (following the context switch
233 * of another thread of the same process).
234 */
235 update_saved_ttbr0(tsk, next);
236 }
237
238 #define deactivate_mm(tsk,mm) do { } while (0)
239 #define activate_mm(prev,next) switch_mm(prev, next, current)
240
241 void verify_cpu_asid_bits(void);
242 void post_ttbr_update_workaround(void);
243
244 #endif /* !__ASSEMBLY__ */
245
246 #endif /* !__ASM_MMU_CONTEXT_H */
247