1 /*
2  * Based on arch/arm/mm/context.c
3  *
4  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/bitops.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/mm.h>
24 
25 #include <asm/cpufeature.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/tlbflush.h>
29 
30 static u32 asid_bits;
31 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
32 
33 static atomic64_t asid_generation;
34 static unsigned long *asid_map;
35 
36 static DEFINE_PER_CPU(atomic64_t, active_asids);
37 static DEFINE_PER_CPU(u64, reserved_asids);
38 static cpumask_t tlb_flush_pending;
39 
40 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
41 #define ASID_FIRST_VERSION	(1UL << asid_bits)
42 
43 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
44 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
45 #define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
46 #define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
47 #else
48 #define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
49 #define asid2idx(asid)		((asid) & ~ASID_MASK)
50 #define idx2asid(idx)		asid2idx(idx)
51 #endif
52 
53 /* Get the ASIDBits supported by the current CPU */
get_cpu_asid_bits(void)54 static u32 get_cpu_asid_bits(void)
55 {
56 	u32 asid;
57 	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
58 						ID_AA64MMFR0_ASID_SHIFT);
59 
60 	switch (fld) {
61 	default:
62 		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
63 					smp_processor_id(),  fld);
64 		/* Fallthrough */
65 	case 0:
66 		asid = 8;
67 		break;
68 	case 2:
69 		asid = 16;
70 	}
71 
72 	return asid;
73 }
74 
75 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
verify_cpu_asid_bits(void)76 void verify_cpu_asid_bits(void)
77 {
78 	u32 asid = get_cpu_asid_bits();
79 
80 	if (asid < asid_bits) {
81 		/*
82 		 * We cannot decrease the ASID size at runtime, so panic if we support
83 		 * fewer ASID bits than the boot CPU.
84 		 */
85 		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
86 				smp_processor_id(), asid, asid_bits);
87 		cpu_panic_kernel();
88 	}
89 }
90 
flush_context(unsigned int cpu)91 static void flush_context(unsigned int cpu)
92 {
93 	int i;
94 	u64 asid;
95 
96 	/* Update the list of reserved ASIDs and the ASID bitmap. */
97 	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
98 
99 	for_each_possible_cpu(i) {
100 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
101 		/*
102 		 * If this CPU has already been through a
103 		 * rollover, but hasn't run another task in
104 		 * the meantime, we must preserve its reserved
105 		 * ASID, as this is the only trace we have of
106 		 * the process it is still running.
107 		 */
108 		if (asid == 0)
109 			asid = per_cpu(reserved_asids, i);
110 		__set_bit(asid2idx(asid), asid_map);
111 		per_cpu(reserved_asids, i) = asid;
112 	}
113 
114 	/*
115 	 * Queue a TLB invalidation for each CPU to perform on next
116 	 * context-switch
117 	 */
118 	cpumask_setall(&tlb_flush_pending);
119 }
120 
check_update_reserved_asid(u64 asid,u64 newasid)121 static bool check_update_reserved_asid(u64 asid, u64 newasid)
122 {
123 	int cpu;
124 	bool hit = false;
125 
126 	/*
127 	 * Iterate over the set of reserved ASIDs looking for a match.
128 	 * If we find one, then we can update our mm to use newasid
129 	 * (i.e. the same ASID in the current generation) but we can't
130 	 * exit the loop early, since we need to ensure that all copies
131 	 * of the old ASID are updated to reflect the mm. Failure to do
132 	 * so could result in us missing the reserved ASID in a future
133 	 * generation.
134 	 */
135 	for_each_possible_cpu(cpu) {
136 		if (per_cpu(reserved_asids, cpu) == asid) {
137 			hit = true;
138 			per_cpu(reserved_asids, cpu) = newasid;
139 		}
140 	}
141 
142 	return hit;
143 }
144 
new_context(struct mm_struct * mm,unsigned int cpu)145 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
146 {
147 	static u32 cur_idx = 1;
148 	u64 asid = atomic64_read(&mm->context.id);
149 	u64 generation = atomic64_read(&asid_generation);
150 
151 	if (asid != 0) {
152 		u64 newasid = generation | (asid & ~ASID_MASK);
153 
154 		/*
155 		 * If our current ASID was active during a rollover, we
156 		 * can continue to use it and this was just a false alarm.
157 		 */
158 		if (check_update_reserved_asid(asid, newasid))
159 			return newasid;
160 
161 		/*
162 		 * We had a valid ASID in a previous life, so try to re-use
163 		 * it if possible.
164 		 */
165 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
166 			return newasid;
167 	}
168 
169 	/*
170 	 * Allocate a free ASID. If we can't find one, take a note of the
171 	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
172 	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
173 	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
174 	 * pairs.
175 	 */
176 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
177 	if (asid != NUM_USER_ASIDS)
178 		goto set_asid;
179 
180 	/* We're out of ASIDs, so increment the global generation count */
181 	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
182 						 &asid_generation);
183 	flush_context(cpu);
184 
185 	/* We have more ASIDs than CPUs, so this will always succeed */
186 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
187 
188 set_asid:
189 	__set_bit(asid, asid_map);
190 	cur_idx = asid;
191 	return idx2asid(asid) | generation;
192 }
193 
check_and_switch_context(struct mm_struct * mm,unsigned int cpu)194 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
195 {
196 	unsigned long flags;
197 	u64 asid, old_active_asid;
198 
199 	asid = atomic64_read(&mm->context.id);
200 
201 	/*
202 	 * The memory ordering here is subtle.
203 	 * If our active_asids is non-zero and the ASID matches the current
204 	 * generation, then we update the active_asids entry with a relaxed
205 	 * cmpxchg. Racing with a concurrent rollover means that either:
206 	 *
207 	 * - We get a zero back from the cmpxchg and end up waiting on the
208 	 *   lock. Taking the lock synchronises with the rollover and so
209 	 *   we are forced to see the updated generation.
210 	 *
211 	 * - We get a valid ASID back from the cmpxchg, which means the
212 	 *   relaxed xchg in flush_context will treat us as reserved
213 	 *   because atomic RmWs are totally ordered for a given location.
214 	 */
215 	old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
216 	if (old_active_asid &&
217 	    !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
218 	    atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
219 				     old_active_asid, asid))
220 		goto switch_mm_fastpath;
221 
222 	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
223 	/* Check that our ASID belongs to the current generation. */
224 	asid = atomic64_read(&mm->context.id);
225 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
226 		asid = new_context(mm, cpu);
227 		atomic64_set(&mm->context.id, asid);
228 	}
229 
230 	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
231 		local_flush_tlb_all();
232 
233 	atomic64_set(&per_cpu(active_asids, cpu), asid);
234 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
235 
236 switch_mm_fastpath:
237 
238 	arm64_apply_bp_hardening();
239 
240 	/*
241 	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
242 	 * emulating PAN.
243 	 */
244 	if (!system_uses_ttbr0_pan())
245 		cpu_switch_mm(mm->pgd, mm);
246 }
247 
248 /* Errata workaround post TTBRx_EL1 update. */
post_ttbr_update_workaround(void)249 asmlinkage void post_ttbr_update_workaround(void)
250 {
251 	asm(ALTERNATIVE("nop; nop; nop",
252 			"ic iallu; dsb nsh; isb",
253 			ARM64_WORKAROUND_CAVIUM_27456,
254 			CONFIG_CAVIUM_ERRATUM_27456));
255 }
256 
asids_init(void)257 static int asids_init(void)
258 {
259 	asid_bits = get_cpu_asid_bits();
260 	/*
261 	 * Expect allocation after rollover to fail if we don't have at least
262 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
263 	 */
264 	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
265 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
266 	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
267 			   GFP_KERNEL);
268 	if (!asid_map)
269 		panic("Failed to allocate bitmap for %lu ASIDs\n",
270 		      NUM_USER_ASIDS);
271 
272 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
273 	return 0;
274 }
275 early_initcall(asids_init);
276