1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * alternative runtime patching
4 * inspired by the x86 version
5 *
6 * Copyright (C) 2014 ARM Ltd.
7 */
8
9 #define pr_fmt(fmt) "alternatives: " fmt
10
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <asm/cacheflush.h>
14 #include <asm/alternative.h>
15 #include <asm/cpufeature.h>
16 #include <asm/insn.h>
17 #include <asm/sections.h>
18 #include <linux/stop_machine.h>
19
20 #define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
21 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
22 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
23
24 static int all_alternatives_applied;
25
26 static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
27
28 struct alt_region {
29 struct alt_instr *begin;
30 struct alt_instr *end;
31 };
32
alternative_is_applied(u16 cpufeature)33 bool alternative_is_applied(u16 cpufeature)
34 {
35 if (WARN_ON(cpufeature >= ARM64_NCAPS))
36 return false;
37
38 return test_bit(cpufeature, applied_alternatives);
39 }
40
41 /*
42 * Check if the target PC is within an alternative block.
43 */
branch_insn_requires_update(struct alt_instr * alt,unsigned long pc)44 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
45 {
46 unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
47 return !(pc >= replptr && pc <= (replptr + alt->alt_len));
48 }
49
50 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
51
get_alt_insn(struct alt_instr * alt,__le32 * insnptr,__le32 * altinsnptr)52 static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
53 {
54 u32 insn;
55
56 insn = le32_to_cpu(*altinsnptr);
57
58 if (aarch64_insn_is_branch_imm(insn)) {
59 s32 offset = aarch64_get_branch_offset(insn);
60 unsigned long target;
61
62 target = (unsigned long)altinsnptr + offset;
63
64 /*
65 * If we're branching inside the alternate sequence,
66 * do not rewrite the instruction, as it is already
67 * correct. Otherwise, generate the new instruction.
68 */
69 if (branch_insn_requires_update(alt, target)) {
70 offset = target - (unsigned long)insnptr;
71 insn = aarch64_set_branch_offset(insn, offset);
72 }
73 } else if (aarch64_insn_is_adrp(insn)) {
74 s32 orig_offset, new_offset;
75 unsigned long target;
76
77 /*
78 * If we're replacing an adrp instruction, which uses PC-relative
79 * immediate addressing, adjust the offset to reflect the new
80 * PC. adrp operates on 4K aligned addresses.
81 */
82 orig_offset = aarch64_insn_adrp_get_offset(insn);
83 target = align_down(altinsnptr, SZ_4K) + orig_offset;
84 new_offset = target - align_down(insnptr, SZ_4K);
85 insn = aarch64_insn_adrp_set_offset(insn, new_offset);
86 } else if (aarch64_insn_uses_literal(insn)) {
87 /*
88 * Disallow patching unhandled instructions using PC relative
89 * literal addresses
90 */
91 BUG();
92 }
93
94 return insn;
95 }
96
patch_alternative(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)97 static void patch_alternative(struct alt_instr *alt,
98 __le32 *origptr, __le32 *updptr, int nr_inst)
99 {
100 __le32 *replptr;
101 int i;
102
103 replptr = ALT_REPL_PTR(alt);
104 for (i = 0; i < nr_inst; i++) {
105 u32 insn;
106
107 insn = get_alt_insn(alt, origptr + i, replptr + i);
108 updptr[i] = cpu_to_le32(insn);
109 }
110 }
111
112 /*
113 * We provide our own, private D-cache cleaning function so that we don't
114 * accidentally call into the cache.S code, which is patched by us at
115 * runtime.
116 */
clean_dcache_range_nopatch(u64 start,u64 end)117 static void clean_dcache_range_nopatch(u64 start, u64 end)
118 {
119 u64 cur, d_size, ctr_el0;
120
121 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
122 d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
123 CTR_DMINLINE_SHIFT);
124 cur = start & ~(d_size - 1);
125 do {
126 /*
127 * We must clean+invalidate to the PoC in order to avoid
128 * Cortex-A53 errata 826319, 827319, 824069 and 819472
129 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
130 */
131 asm volatile("dc civac, %0" : : "r" (cur) : "memory");
132 } while (cur += d_size, cur < end);
133 }
134
__apply_alternatives(void * alt_region,bool is_module,unsigned long * feature_mask)135 static void __apply_alternatives(void *alt_region, bool is_module,
136 unsigned long *feature_mask)
137 {
138 struct alt_instr *alt;
139 struct alt_region *region = alt_region;
140 __le32 *origptr, *updptr;
141 alternative_cb_t alt_cb;
142
143 for (alt = region->begin; alt < region->end; alt++) {
144 int nr_inst;
145
146 if (!test_bit(alt->cpufeature, feature_mask))
147 continue;
148
149 /* Use ARM64_CB_PATCH as an unconditional patch */
150 if (alt->cpufeature < ARM64_CB_PATCH &&
151 !cpus_have_cap(alt->cpufeature))
152 continue;
153
154 if (alt->cpufeature == ARM64_CB_PATCH)
155 BUG_ON(alt->alt_len != 0);
156 else
157 BUG_ON(alt->alt_len != alt->orig_len);
158
159 pr_info_once("patching kernel code\n");
160
161 origptr = ALT_ORIG_PTR(alt);
162 updptr = is_module ? origptr : lm_alias(origptr);
163 nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
164
165 if (alt->cpufeature < ARM64_CB_PATCH)
166 alt_cb = patch_alternative;
167 else
168 alt_cb = ALT_REPL_PTR(alt);
169
170 alt_cb(alt, origptr, updptr, nr_inst);
171
172 if (!is_module) {
173 clean_dcache_range_nopatch((u64)origptr,
174 (u64)(origptr + nr_inst));
175 }
176 }
177
178 /*
179 * The core module code takes care of cache maintenance in
180 * flush_module_icache().
181 */
182 if (!is_module) {
183 dsb(ish);
184 __flush_icache_all();
185 isb();
186
187 /* Ignore ARM64_CB bit from feature mask */
188 bitmap_or(applied_alternatives, applied_alternatives,
189 feature_mask, ARM64_NCAPS);
190 bitmap_and(applied_alternatives, applied_alternatives,
191 cpu_hwcaps, ARM64_NCAPS);
192 }
193 }
194
195 /*
196 * We might be patching the stop_machine state machine, so implement a
197 * really simple polling protocol here.
198 */
__apply_alternatives_multi_stop(void * unused)199 static int __apply_alternatives_multi_stop(void *unused)
200 {
201 struct alt_region region = {
202 .begin = (struct alt_instr *)__alt_instructions,
203 .end = (struct alt_instr *)__alt_instructions_end,
204 };
205
206 /* We always have a CPU 0 at this point (__init) */
207 if (smp_processor_id()) {
208 while (!READ_ONCE(all_alternatives_applied))
209 cpu_relax();
210 isb();
211 } else {
212 DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
213
214 bitmap_complement(remaining_capabilities, boot_capabilities,
215 ARM64_NPATCHABLE);
216
217 BUG_ON(all_alternatives_applied);
218 __apply_alternatives(®ion, false, remaining_capabilities);
219 /* Barriers provided by the cache flushing */
220 WRITE_ONCE(all_alternatives_applied, 1);
221 }
222
223 return 0;
224 }
225
apply_alternatives_all(void)226 void __init apply_alternatives_all(void)
227 {
228 /* better not try code patching on a live SMP system */
229 stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
230 }
231
232 /*
233 * This is called very early in the boot process (directly after we run
234 * a feature detect on the boot CPU). No need to worry about other CPUs
235 * here.
236 */
apply_boot_alternatives(void)237 void __init apply_boot_alternatives(void)
238 {
239 struct alt_region region = {
240 .begin = (struct alt_instr *)__alt_instructions,
241 .end = (struct alt_instr *)__alt_instructions_end,
242 };
243
244 /* If called on non-boot cpu things could go wrong */
245 WARN_ON(smp_processor_id() != 0);
246
247 __apply_alternatives(®ion, false, &boot_capabilities[0]);
248 }
249
250 #ifdef CONFIG_MODULES
apply_alternatives_module(void * start,size_t length)251 void apply_alternatives_module(void *start, size_t length)
252 {
253 struct alt_region region = {
254 .begin = start,
255 .end = start + length,
256 };
257 DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
258
259 bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
260
261 __apply_alternatives(®ion, true, &all_capabilities[0]);
262 }
263 #endif
264