1 /*
2  * Copyright (C) 2013 Huawei Ltd.
3  * Author: Jiang Liu <liuj97@gmail.com>
4  *
5  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/bug.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/stop_machine.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 
30 #include <asm/cacheflush.h>
31 #include <asm/debug-monitors.h>
32 #include <asm/fixmap.h>
33 #include <asm/insn.h>
34 #include <asm/kprobes.h>
35 
36 #define AARCH64_INSN_SF_BIT	BIT(31)
37 #define AARCH64_INSN_N_BIT	BIT(22)
38 #define AARCH64_INSN_LSL_12	BIT(22)
39 
40 static int aarch64_insn_encoding_class[] = {
41 	AARCH64_INSN_CLS_UNKNOWN,
42 	AARCH64_INSN_CLS_UNKNOWN,
43 	AARCH64_INSN_CLS_UNKNOWN,
44 	AARCH64_INSN_CLS_UNKNOWN,
45 	AARCH64_INSN_CLS_LDST,
46 	AARCH64_INSN_CLS_DP_REG,
47 	AARCH64_INSN_CLS_LDST,
48 	AARCH64_INSN_CLS_DP_FPSIMD,
49 	AARCH64_INSN_CLS_DP_IMM,
50 	AARCH64_INSN_CLS_DP_IMM,
51 	AARCH64_INSN_CLS_BR_SYS,
52 	AARCH64_INSN_CLS_BR_SYS,
53 	AARCH64_INSN_CLS_LDST,
54 	AARCH64_INSN_CLS_DP_REG,
55 	AARCH64_INSN_CLS_LDST,
56 	AARCH64_INSN_CLS_DP_FPSIMD,
57 };
58 
aarch64_get_insn_class(u32 insn)59 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
60 {
61 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
62 }
63 
64 /* NOP is an alias of HINT */
aarch64_insn_is_nop(u32 insn)65 bool __kprobes aarch64_insn_is_nop(u32 insn)
66 {
67 	if (!aarch64_insn_is_hint(insn))
68 		return false;
69 
70 	switch (insn & 0xFE0) {
71 	case AARCH64_INSN_HINT_YIELD:
72 	case AARCH64_INSN_HINT_WFE:
73 	case AARCH64_INSN_HINT_WFI:
74 	case AARCH64_INSN_HINT_SEV:
75 	case AARCH64_INSN_HINT_SEVL:
76 		return false;
77 	default:
78 		return true;
79 	}
80 }
81 
aarch64_insn_is_branch_imm(u32 insn)82 bool aarch64_insn_is_branch_imm(u32 insn)
83 {
84 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
85 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
86 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
87 		aarch64_insn_is_bcond(insn));
88 }
89 
90 static DEFINE_RAW_SPINLOCK(patch_lock);
91 
patch_map(void * addr,int fixmap)92 static void __kprobes *patch_map(void *addr, int fixmap)
93 {
94 	unsigned long uintaddr = (uintptr_t) addr;
95 	bool module = !core_kernel_text(uintaddr);
96 	struct page *page;
97 
98 	if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
99 		page = vmalloc_to_page(addr);
100 	else if (!module)
101 		page = phys_to_page(__pa_symbol(addr));
102 	else
103 		return addr;
104 
105 	BUG_ON(!page);
106 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
107 			(uintaddr & ~PAGE_MASK));
108 }
109 
patch_unmap(int fixmap)110 static void __kprobes patch_unmap(int fixmap)
111 {
112 	clear_fixmap(fixmap);
113 }
114 /*
115  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
116  * little-endian.
117  */
aarch64_insn_read(void * addr,u32 * insnp)118 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
119 {
120 	int ret;
121 	__le32 val;
122 
123 	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
124 	if (!ret)
125 		*insnp = le32_to_cpu(val);
126 
127 	return ret;
128 }
129 
__aarch64_insn_write(void * addr,__le32 insn)130 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
131 {
132 	void *waddr = addr;
133 	unsigned long flags = 0;
134 	int ret;
135 
136 	raw_spin_lock_irqsave(&patch_lock, flags);
137 	waddr = patch_map(addr, FIX_TEXT_POKE0);
138 
139 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
140 
141 	patch_unmap(FIX_TEXT_POKE0);
142 	raw_spin_unlock_irqrestore(&patch_lock, flags);
143 
144 	return ret;
145 }
146 
aarch64_insn_write(void * addr,u32 insn)147 int __kprobes aarch64_insn_write(void *addr, u32 insn)
148 {
149 	return __aarch64_insn_write(addr, cpu_to_le32(insn));
150 }
151 
aarch64_insn_uses_literal(u32 insn)152 bool __kprobes aarch64_insn_uses_literal(u32 insn)
153 {
154 	/* ldr/ldrsw (literal), prfm */
155 
156 	return aarch64_insn_is_ldr_lit(insn) ||
157 		aarch64_insn_is_ldrsw_lit(insn) ||
158 		aarch64_insn_is_adr_adrp(insn) ||
159 		aarch64_insn_is_prfm_lit(insn);
160 }
161 
aarch64_insn_is_branch(u32 insn)162 bool __kprobes aarch64_insn_is_branch(u32 insn)
163 {
164 	/* b, bl, cb*, tb*, b.cond, br, blr */
165 
166 	return aarch64_insn_is_b(insn) ||
167 		aarch64_insn_is_bl(insn) ||
168 		aarch64_insn_is_cbz(insn) ||
169 		aarch64_insn_is_cbnz(insn) ||
170 		aarch64_insn_is_tbz(insn) ||
171 		aarch64_insn_is_tbnz(insn) ||
172 		aarch64_insn_is_ret(insn) ||
173 		aarch64_insn_is_br(insn) ||
174 		aarch64_insn_is_blr(insn) ||
175 		aarch64_insn_is_bcond(insn);
176 }
177 
aarch64_insn_patch_text_nosync(void * addr,u32 insn)178 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
179 {
180 	u32 *tp = addr;
181 	int ret;
182 
183 	/* A64 instructions must be word aligned */
184 	if ((uintptr_t)tp & 0x3)
185 		return -EINVAL;
186 
187 	ret = aarch64_insn_write(tp, insn);
188 	if (ret == 0)
189 		__flush_icache_range((uintptr_t)tp,
190 				     (uintptr_t)tp + AARCH64_INSN_SIZE);
191 
192 	return ret;
193 }
194 
195 struct aarch64_insn_patch {
196 	void		**text_addrs;
197 	u32		*new_insns;
198 	int		insn_cnt;
199 	atomic_t	cpu_count;
200 };
201 
aarch64_insn_patch_text_cb(void * arg)202 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
203 {
204 	int i, ret = 0;
205 	struct aarch64_insn_patch *pp = arg;
206 
207 	/* The first CPU becomes master */
208 	if (atomic_inc_return(&pp->cpu_count) == 1) {
209 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
210 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
211 							     pp->new_insns[i]);
212 		/* Notify other processors with an additional increment. */
213 		atomic_inc(&pp->cpu_count);
214 	} else {
215 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
216 			cpu_relax();
217 		isb();
218 	}
219 
220 	return ret;
221 }
222 
aarch64_insn_patch_text(void * addrs[],u32 insns[],int cnt)223 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
224 {
225 	struct aarch64_insn_patch patch = {
226 		.text_addrs = addrs,
227 		.new_insns = insns,
228 		.insn_cnt = cnt,
229 		.cpu_count = ATOMIC_INIT(0),
230 	};
231 
232 	if (cnt <= 0)
233 		return -EINVAL;
234 
235 	return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
236 				       cpu_online_mask);
237 }
238 
aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,u32 * maskp,int * shiftp)239 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
240 						u32 *maskp, int *shiftp)
241 {
242 	u32 mask;
243 	int shift;
244 
245 	switch (type) {
246 	case AARCH64_INSN_IMM_26:
247 		mask = BIT(26) - 1;
248 		shift = 0;
249 		break;
250 	case AARCH64_INSN_IMM_19:
251 		mask = BIT(19) - 1;
252 		shift = 5;
253 		break;
254 	case AARCH64_INSN_IMM_16:
255 		mask = BIT(16) - 1;
256 		shift = 5;
257 		break;
258 	case AARCH64_INSN_IMM_14:
259 		mask = BIT(14) - 1;
260 		shift = 5;
261 		break;
262 	case AARCH64_INSN_IMM_12:
263 		mask = BIT(12) - 1;
264 		shift = 10;
265 		break;
266 	case AARCH64_INSN_IMM_9:
267 		mask = BIT(9) - 1;
268 		shift = 12;
269 		break;
270 	case AARCH64_INSN_IMM_7:
271 		mask = BIT(7) - 1;
272 		shift = 15;
273 		break;
274 	case AARCH64_INSN_IMM_6:
275 	case AARCH64_INSN_IMM_S:
276 		mask = BIT(6) - 1;
277 		shift = 10;
278 		break;
279 	case AARCH64_INSN_IMM_R:
280 		mask = BIT(6) - 1;
281 		shift = 16;
282 		break;
283 	case AARCH64_INSN_IMM_N:
284 		mask = 1;
285 		shift = 22;
286 		break;
287 	default:
288 		return -EINVAL;
289 	}
290 
291 	*maskp = mask;
292 	*shiftp = shift;
293 
294 	return 0;
295 }
296 
297 #define ADR_IMM_HILOSPLIT	2
298 #define ADR_IMM_SIZE		SZ_2M
299 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
300 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
301 #define ADR_IMM_LOSHIFT		29
302 #define ADR_IMM_HISHIFT		5
303 
aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type,u32 insn)304 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
305 {
306 	u32 immlo, immhi, mask;
307 	int shift;
308 
309 	switch (type) {
310 	case AARCH64_INSN_IMM_ADR:
311 		shift = 0;
312 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
313 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
314 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
315 		mask = ADR_IMM_SIZE - 1;
316 		break;
317 	default:
318 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
319 			pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
320 			       type);
321 			return 0;
322 		}
323 	}
324 
325 	return (insn >> shift) & mask;
326 }
327 
aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,u32 insn,u64 imm)328 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
329 				  u32 insn, u64 imm)
330 {
331 	u32 immlo, immhi, mask;
332 	int shift;
333 
334 	if (insn == AARCH64_BREAK_FAULT)
335 		return AARCH64_BREAK_FAULT;
336 
337 	switch (type) {
338 	case AARCH64_INSN_IMM_ADR:
339 		shift = 0;
340 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
341 		imm >>= ADR_IMM_HILOSPLIT;
342 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
343 		imm = immlo | immhi;
344 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
345 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
346 		break;
347 	default:
348 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
349 			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
350 			       type);
351 			return AARCH64_BREAK_FAULT;
352 		}
353 	}
354 
355 	/* Update the immediate field. */
356 	insn &= ~(mask << shift);
357 	insn |= (imm & mask) << shift;
358 
359 	return insn;
360 }
361 
aarch64_insn_decode_register(enum aarch64_insn_register_type type,u32 insn)362 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
363 					u32 insn)
364 {
365 	int shift;
366 
367 	switch (type) {
368 	case AARCH64_INSN_REGTYPE_RT:
369 	case AARCH64_INSN_REGTYPE_RD:
370 		shift = 0;
371 		break;
372 	case AARCH64_INSN_REGTYPE_RN:
373 		shift = 5;
374 		break;
375 	case AARCH64_INSN_REGTYPE_RT2:
376 	case AARCH64_INSN_REGTYPE_RA:
377 		shift = 10;
378 		break;
379 	case AARCH64_INSN_REGTYPE_RM:
380 		shift = 16;
381 		break;
382 	default:
383 		pr_err("%s: unknown register type encoding %d\n", __func__,
384 		       type);
385 		return 0;
386 	}
387 
388 	return (insn >> shift) & GENMASK(4, 0);
389 }
390 
aarch64_insn_encode_register(enum aarch64_insn_register_type type,u32 insn,enum aarch64_insn_register reg)391 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
392 					u32 insn,
393 					enum aarch64_insn_register reg)
394 {
395 	int shift;
396 
397 	if (insn == AARCH64_BREAK_FAULT)
398 		return AARCH64_BREAK_FAULT;
399 
400 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
401 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
402 		return AARCH64_BREAK_FAULT;
403 	}
404 
405 	switch (type) {
406 	case AARCH64_INSN_REGTYPE_RT:
407 	case AARCH64_INSN_REGTYPE_RD:
408 		shift = 0;
409 		break;
410 	case AARCH64_INSN_REGTYPE_RN:
411 		shift = 5;
412 		break;
413 	case AARCH64_INSN_REGTYPE_RT2:
414 	case AARCH64_INSN_REGTYPE_RA:
415 		shift = 10;
416 		break;
417 	case AARCH64_INSN_REGTYPE_RM:
418 	case AARCH64_INSN_REGTYPE_RS:
419 		shift = 16;
420 		break;
421 	default:
422 		pr_err("%s: unknown register type encoding %d\n", __func__,
423 		       type);
424 		return AARCH64_BREAK_FAULT;
425 	}
426 
427 	insn &= ~(GENMASK(4, 0) << shift);
428 	insn |= reg << shift;
429 
430 	return insn;
431 }
432 
aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,u32 insn)433 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
434 					 u32 insn)
435 {
436 	u32 size;
437 
438 	switch (type) {
439 	case AARCH64_INSN_SIZE_8:
440 		size = 0;
441 		break;
442 	case AARCH64_INSN_SIZE_16:
443 		size = 1;
444 		break;
445 	case AARCH64_INSN_SIZE_32:
446 		size = 2;
447 		break;
448 	case AARCH64_INSN_SIZE_64:
449 		size = 3;
450 		break;
451 	default:
452 		pr_err("%s: unknown size encoding %d\n", __func__, type);
453 		return AARCH64_BREAK_FAULT;
454 	}
455 
456 	insn &= ~GENMASK(31, 30);
457 	insn |= size << 30;
458 
459 	return insn;
460 }
461 
branch_imm_common(unsigned long pc,unsigned long addr,long range)462 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
463 				     long range)
464 {
465 	long offset;
466 
467 	if ((pc & 0x3) || (addr & 0x3)) {
468 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
469 		return range;
470 	}
471 
472 	offset = ((long)addr - (long)pc);
473 
474 	if (offset < -range || offset >= range) {
475 		pr_err("%s: offset out of range\n", __func__);
476 		return range;
477 	}
478 
479 	return offset;
480 }
481 
aarch64_insn_gen_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_branch_type type)482 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
483 					  enum aarch64_insn_branch_type type)
484 {
485 	u32 insn;
486 	long offset;
487 
488 	/*
489 	 * B/BL support [-128M, 128M) offset
490 	 * ARM64 virtual address arrangement guarantees all kernel and module
491 	 * texts are within +/-128M.
492 	 */
493 	offset = branch_imm_common(pc, addr, SZ_128M);
494 	if (offset >= SZ_128M)
495 		return AARCH64_BREAK_FAULT;
496 
497 	switch (type) {
498 	case AARCH64_INSN_BRANCH_LINK:
499 		insn = aarch64_insn_get_bl_value();
500 		break;
501 	case AARCH64_INSN_BRANCH_NOLINK:
502 		insn = aarch64_insn_get_b_value();
503 		break;
504 	default:
505 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
506 		return AARCH64_BREAK_FAULT;
507 	}
508 
509 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
510 					     offset >> 2);
511 }
512 
aarch64_insn_gen_comp_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_branch_type type)513 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
514 				     enum aarch64_insn_register reg,
515 				     enum aarch64_insn_variant variant,
516 				     enum aarch64_insn_branch_type type)
517 {
518 	u32 insn;
519 	long offset;
520 
521 	offset = branch_imm_common(pc, addr, SZ_1M);
522 	if (offset >= SZ_1M)
523 		return AARCH64_BREAK_FAULT;
524 
525 	switch (type) {
526 	case AARCH64_INSN_BRANCH_COMP_ZERO:
527 		insn = aarch64_insn_get_cbz_value();
528 		break;
529 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
530 		insn = aarch64_insn_get_cbnz_value();
531 		break;
532 	default:
533 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
534 		return AARCH64_BREAK_FAULT;
535 	}
536 
537 	switch (variant) {
538 	case AARCH64_INSN_VARIANT_32BIT:
539 		break;
540 	case AARCH64_INSN_VARIANT_64BIT:
541 		insn |= AARCH64_INSN_SF_BIT;
542 		break;
543 	default:
544 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
545 		return AARCH64_BREAK_FAULT;
546 	}
547 
548 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
549 
550 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
551 					     offset >> 2);
552 }
553 
aarch64_insn_gen_cond_branch_imm(unsigned long pc,unsigned long addr,enum aarch64_insn_condition cond)554 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
555 				     enum aarch64_insn_condition cond)
556 {
557 	u32 insn;
558 	long offset;
559 
560 	offset = branch_imm_common(pc, addr, SZ_1M);
561 
562 	insn = aarch64_insn_get_bcond_value();
563 
564 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
565 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
566 		return AARCH64_BREAK_FAULT;
567 	}
568 	insn |= cond;
569 
570 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
571 					     offset >> 2);
572 }
573 
aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)574 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
575 {
576 	return aarch64_insn_get_hint_value() | op;
577 }
578 
aarch64_insn_gen_nop(void)579 u32 __kprobes aarch64_insn_gen_nop(void)
580 {
581 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
582 }
583 
aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,enum aarch64_insn_branch_type type)584 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
585 				enum aarch64_insn_branch_type type)
586 {
587 	u32 insn;
588 
589 	switch (type) {
590 	case AARCH64_INSN_BRANCH_NOLINK:
591 		insn = aarch64_insn_get_br_value();
592 		break;
593 	case AARCH64_INSN_BRANCH_LINK:
594 		insn = aarch64_insn_get_blr_value();
595 		break;
596 	case AARCH64_INSN_BRANCH_RETURN:
597 		insn = aarch64_insn_get_ret_value();
598 		break;
599 	default:
600 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
601 		return AARCH64_BREAK_FAULT;
602 	}
603 
604 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
605 }
606 
aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register offset,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)607 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
608 				    enum aarch64_insn_register base,
609 				    enum aarch64_insn_register offset,
610 				    enum aarch64_insn_size_type size,
611 				    enum aarch64_insn_ldst_type type)
612 {
613 	u32 insn;
614 
615 	switch (type) {
616 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
617 		insn = aarch64_insn_get_ldr_reg_value();
618 		break;
619 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
620 		insn = aarch64_insn_get_str_reg_value();
621 		break;
622 	default:
623 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
624 		return AARCH64_BREAK_FAULT;
625 	}
626 
627 	insn = aarch64_insn_encode_ldst_size(size, insn);
628 
629 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
630 
631 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
632 					    base);
633 
634 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
635 					    offset);
636 }
637 
aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_register base,int offset,enum aarch64_insn_variant variant,enum aarch64_insn_ldst_type type)638 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
639 				     enum aarch64_insn_register reg2,
640 				     enum aarch64_insn_register base,
641 				     int offset,
642 				     enum aarch64_insn_variant variant,
643 				     enum aarch64_insn_ldst_type type)
644 {
645 	u32 insn;
646 	int shift;
647 
648 	switch (type) {
649 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
650 		insn = aarch64_insn_get_ldp_pre_value();
651 		break;
652 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
653 		insn = aarch64_insn_get_stp_pre_value();
654 		break;
655 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
656 		insn = aarch64_insn_get_ldp_post_value();
657 		break;
658 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
659 		insn = aarch64_insn_get_stp_post_value();
660 		break;
661 	default:
662 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
663 		return AARCH64_BREAK_FAULT;
664 	}
665 
666 	switch (variant) {
667 	case AARCH64_INSN_VARIANT_32BIT:
668 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
669 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
670 			       __func__, offset);
671 			return AARCH64_BREAK_FAULT;
672 		}
673 		shift = 2;
674 		break;
675 	case AARCH64_INSN_VARIANT_64BIT:
676 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
677 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
678 			       __func__, offset);
679 			return AARCH64_BREAK_FAULT;
680 		}
681 		shift = 3;
682 		insn |= AARCH64_INSN_SF_BIT;
683 		break;
684 	default:
685 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
686 		return AARCH64_BREAK_FAULT;
687 	}
688 
689 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
690 					    reg1);
691 
692 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
693 					    reg2);
694 
695 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
696 					    base);
697 
698 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
699 					     offset >> shift);
700 }
701 
aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,enum aarch64_insn_register base,enum aarch64_insn_register state,enum aarch64_insn_size_type size,enum aarch64_insn_ldst_type type)702 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
703 				   enum aarch64_insn_register base,
704 				   enum aarch64_insn_register state,
705 				   enum aarch64_insn_size_type size,
706 				   enum aarch64_insn_ldst_type type)
707 {
708 	u32 insn;
709 
710 	switch (type) {
711 	case AARCH64_INSN_LDST_LOAD_EX:
712 		insn = aarch64_insn_get_load_ex_value();
713 		break;
714 	case AARCH64_INSN_LDST_STORE_EX:
715 		insn = aarch64_insn_get_store_ex_value();
716 		break;
717 	default:
718 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
719 		return AARCH64_BREAK_FAULT;
720 	}
721 
722 	insn = aarch64_insn_encode_ldst_size(size, insn);
723 
724 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
725 					    reg);
726 
727 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
728 					    base);
729 
730 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
731 					    AARCH64_INSN_REG_ZR);
732 
733 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
734 					    state);
735 }
736 
aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy,u32 insn)737 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
738 					enum aarch64_insn_prfm_target target,
739 					enum aarch64_insn_prfm_policy policy,
740 					u32 insn)
741 {
742 	u32 imm_type = 0, imm_target = 0, imm_policy = 0;
743 
744 	switch (type) {
745 	case AARCH64_INSN_PRFM_TYPE_PLD:
746 		break;
747 	case AARCH64_INSN_PRFM_TYPE_PLI:
748 		imm_type = BIT(0);
749 		break;
750 	case AARCH64_INSN_PRFM_TYPE_PST:
751 		imm_type = BIT(1);
752 		break;
753 	default:
754 		pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
755 		return AARCH64_BREAK_FAULT;
756 	}
757 
758 	switch (target) {
759 	case AARCH64_INSN_PRFM_TARGET_L1:
760 		break;
761 	case AARCH64_INSN_PRFM_TARGET_L2:
762 		imm_target = BIT(0);
763 		break;
764 	case AARCH64_INSN_PRFM_TARGET_L3:
765 		imm_target = BIT(1);
766 		break;
767 	default:
768 		pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
769 		return AARCH64_BREAK_FAULT;
770 	}
771 
772 	switch (policy) {
773 	case AARCH64_INSN_PRFM_POLICY_KEEP:
774 		break;
775 	case AARCH64_INSN_PRFM_POLICY_STRM:
776 		imm_policy = BIT(0);
777 		break;
778 	default:
779 		pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
780 		return AARCH64_BREAK_FAULT;
781 	}
782 
783 	/* In this case, imm5 is encoded into Rt field. */
784 	insn &= ~GENMASK(4, 0);
785 	insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
786 
787 	return insn;
788 }
789 
aarch64_insn_gen_prefetch(enum aarch64_insn_register base,enum aarch64_insn_prfm_type type,enum aarch64_insn_prfm_target target,enum aarch64_insn_prfm_policy policy)790 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
791 			      enum aarch64_insn_prfm_type type,
792 			      enum aarch64_insn_prfm_target target,
793 			      enum aarch64_insn_prfm_policy policy)
794 {
795 	u32 insn = aarch64_insn_get_prfm_value();
796 
797 	insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
798 
799 	insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
800 
801 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
802 					    base);
803 
804 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
805 }
806 
aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,enum aarch64_insn_register src,int imm,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)807 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
808 				 enum aarch64_insn_register src,
809 				 int imm, enum aarch64_insn_variant variant,
810 				 enum aarch64_insn_adsb_type type)
811 {
812 	u32 insn;
813 
814 	switch (type) {
815 	case AARCH64_INSN_ADSB_ADD:
816 		insn = aarch64_insn_get_add_imm_value();
817 		break;
818 	case AARCH64_INSN_ADSB_SUB:
819 		insn = aarch64_insn_get_sub_imm_value();
820 		break;
821 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
822 		insn = aarch64_insn_get_adds_imm_value();
823 		break;
824 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
825 		insn = aarch64_insn_get_subs_imm_value();
826 		break;
827 	default:
828 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
829 		return AARCH64_BREAK_FAULT;
830 	}
831 
832 	switch (variant) {
833 	case AARCH64_INSN_VARIANT_32BIT:
834 		break;
835 	case AARCH64_INSN_VARIANT_64BIT:
836 		insn |= AARCH64_INSN_SF_BIT;
837 		break;
838 	default:
839 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
840 		return AARCH64_BREAK_FAULT;
841 	}
842 
843 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
844 	if (imm & ~(BIT(24) - 1))
845 		goto out;
846 
847 	/* If we have something in the top 12 bits... */
848 	if (imm & ~(SZ_4K - 1)) {
849 		/* ... and in the low 12 bits -> error */
850 		if (imm & (SZ_4K - 1))
851 			goto out;
852 
853 		imm >>= 12;
854 		insn |= AARCH64_INSN_LSL_12;
855 	}
856 
857 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
858 
859 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
860 
861 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
862 
863 out:
864 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
865 	return AARCH64_BREAK_FAULT;
866 }
867 
aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,enum aarch64_insn_register src,int immr,int imms,enum aarch64_insn_variant variant,enum aarch64_insn_bitfield_type type)868 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
869 			      enum aarch64_insn_register src,
870 			      int immr, int imms,
871 			      enum aarch64_insn_variant variant,
872 			      enum aarch64_insn_bitfield_type type)
873 {
874 	u32 insn;
875 	u32 mask;
876 
877 	switch (type) {
878 	case AARCH64_INSN_BITFIELD_MOVE:
879 		insn = aarch64_insn_get_bfm_value();
880 		break;
881 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
882 		insn = aarch64_insn_get_ubfm_value();
883 		break;
884 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
885 		insn = aarch64_insn_get_sbfm_value();
886 		break;
887 	default:
888 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
889 		return AARCH64_BREAK_FAULT;
890 	}
891 
892 	switch (variant) {
893 	case AARCH64_INSN_VARIANT_32BIT:
894 		mask = GENMASK(4, 0);
895 		break;
896 	case AARCH64_INSN_VARIANT_64BIT:
897 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
898 		mask = GENMASK(5, 0);
899 		break;
900 	default:
901 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
902 		return AARCH64_BREAK_FAULT;
903 	}
904 
905 	if (immr & ~mask) {
906 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
907 		return AARCH64_BREAK_FAULT;
908 	}
909 	if (imms & ~mask) {
910 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
911 		return AARCH64_BREAK_FAULT;
912 	}
913 
914 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
915 
916 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
917 
918 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
919 
920 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
921 }
922 
aarch64_insn_gen_movewide(enum aarch64_insn_register dst,int imm,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_movewide_type type)923 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
924 			      int imm, int shift,
925 			      enum aarch64_insn_variant variant,
926 			      enum aarch64_insn_movewide_type type)
927 {
928 	u32 insn;
929 
930 	switch (type) {
931 	case AARCH64_INSN_MOVEWIDE_ZERO:
932 		insn = aarch64_insn_get_movz_value();
933 		break;
934 	case AARCH64_INSN_MOVEWIDE_KEEP:
935 		insn = aarch64_insn_get_movk_value();
936 		break;
937 	case AARCH64_INSN_MOVEWIDE_INVERSE:
938 		insn = aarch64_insn_get_movn_value();
939 		break;
940 	default:
941 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
942 		return AARCH64_BREAK_FAULT;
943 	}
944 
945 	if (imm & ~(SZ_64K - 1)) {
946 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
947 		return AARCH64_BREAK_FAULT;
948 	}
949 
950 	switch (variant) {
951 	case AARCH64_INSN_VARIANT_32BIT:
952 		if (shift != 0 && shift != 16) {
953 			pr_err("%s: invalid shift encoding %d\n", __func__,
954 			       shift);
955 			return AARCH64_BREAK_FAULT;
956 		}
957 		break;
958 	case AARCH64_INSN_VARIANT_64BIT:
959 		insn |= AARCH64_INSN_SF_BIT;
960 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
961 			pr_err("%s: invalid shift encoding %d\n", __func__,
962 			       shift);
963 			return AARCH64_BREAK_FAULT;
964 		}
965 		break;
966 	default:
967 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
968 		return AARCH64_BREAK_FAULT;
969 	}
970 
971 	insn |= (shift >> 4) << 21;
972 
973 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
974 
975 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
976 }
977 
aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_adsb_type type)978 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
979 					 enum aarch64_insn_register src,
980 					 enum aarch64_insn_register reg,
981 					 int shift,
982 					 enum aarch64_insn_variant variant,
983 					 enum aarch64_insn_adsb_type type)
984 {
985 	u32 insn;
986 
987 	switch (type) {
988 	case AARCH64_INSN_ADSB_ADD:
989 		insn = aarch64_insn_get_add_value();
990 		break;
991 	case AARCH64_INSN_ADSB_SUB:
992 		insn = aarch64_insn_get_sub_value();
993 		break;
994 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
995 		insn = aarch64_insn_get_adds_value();
996 		break;
997 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
998 		insn = aarch64_insn_get_subs_value();
999 		break;
1000 	default:
1001 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1002 		return AARCH64_BREAK_FAULT;
1003 	}
1004 
1005 	switch (variant) {
1006 	case AARCH64_INSN_VARIANT_32BIT:
1007 		if (shift & ~(SZ_32 - 1)) {
1008 			pr_err("%s: invalid shift encoding %d\n", __func__,
1009 			       shift);
1010 			return AARCH64_BREAK_FAULT;
1011 		}
1012 		break;
1013 	case AARCH64_INSN_VARIANT_64BIT:
1014 		insn |= AARCH64_INSN_SF_BIT;
1015 		if (shift & ~(SZ_64 - 1)) {
1016 			pr_err("%s: invalid shift encoding %d\n", __func__,
1017 			       shift);
1018 			return AARCH64_BREAK_FAULT;
1019 		}
1020 		break;
1021 	default:
1022 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1023 		return AARCH64_BREAK_FAULT;
1024 	}
1025 
1026 
1027 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1028 
1029 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1030 
1031 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1032 
1033 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1034 }
1035 
aarch64_insn_gen_data1(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_variant variant,enum aarch64_insn_data1_type type)1036 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1037 			   enum aarch64_insn_register src,
1038 			   enum aarch64_insn_variant variant,
1039 			   enum aarch64_insn_data1_type type)
1040 {
1041 	u32 insn;
1042 
1043 	switch (type) {
1044 	case AARCH64_INSN_DATA1_REVERSE_16:
1045 		insn = aarch64_insn_get_rev16_value();
1046 		break;
1047 	case AARCH64_INSN_DATA1_REVERSE_32:
1048 		insn = aarch64_insn_get_rev32_value();
1049 		break;
1050 	case AARCH64_INSN_DATA1_REVERSE_64:
1051 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1052 			pr_err("%s: invalid variant for reverse64 %d\n",
1053 			       __func__, variant);
1054 			return AARCH64_BREAK_FAULT;
1055 		}
1056 		insn = aarch64_insn_get_rev64_value();
1057 		break;
1058 	default:
1059 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1060 		return AARCH64_BREAK_FAULT;
1061 	}
1062 
1063 	switch (variant) {
1064 	case AARCH64_INSN_VARIANT_32BIT:
1065 		break;
1066 	case AARCH64_INSN_VARIANT_64BIT:
1067 		insn |= AARCH64_INSN_SF_BIT;
1068 		break;
1069 	default:
1070 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1071 		return AARCH64_BREAK_FAULT;
1072 	}
1073 
1074 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1075 
1076 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1077 }
1078 
aarch64_insn_gen_data2(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,enum aarch64_insn_variant variant,enum aarch64_insn_data2_type type)1079 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1080 			   enum aarch64_insn_register src,
1081 			   enum aarch64_insn_register reg,
1082 			   enum aarch64_insn_variant variant,
1083 			   enum aarch64_insn_data2_type type)
1084 {
1085 	u32 insn;
1086 
1087 	switch (type) {
1088 	case AARCH64_INSN_DATA2_UDIV:
1089 		insn = aarch64_insn_get_udiv_value();
1090 		break;
1091 	case AARCH64_INSN_DATA2_SDIV:
1092 		insn = aarch64_insn_get_sdiv_value();
1093 		break;
1094 	case AARCH64_INSN_DATA2_LSLV:
1095 		insn = aarch64_insn_get_lslv_value();
1096 		break;
1097 	case AARCH64_INSN_DATA2_LSRV:
1098 		insn = aarch64_insn_get_lsrv_value();
1099 		break;
1100 	case AARCH64_INSN_DATA2_ASRV:
1101 		insn = aarch64_insn_get_asrv_value();
1102 		break;
1103 	case AARCH64_INSN_DATA2_RORV:
1104 		insn = aarch64_insn_get_rorv_value();
1105 		break;
1106 	default:
1107 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1108 		return AARCH64_BREAK_FAULT;
1109 	}
1110 
1111 	switch (variant) {
1112 	case AARCH64_INSN_VARIANT_32BIT:
1113 		break;
1114 	case AARCH64_INSN_VARIANT_64BIT:
1115 		insn |= AARCH64_INSN_SF_BIT;
1116 		break;
1117 	default:
1118 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1119 		return AARCH64_BREAK_FAULT;
1120 	}
1121 
1122 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1123 
1124 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1125 
1126 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1127 }
1128 
aarch64_insn_gen_data3(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg1,enum aarch64_insn_register reg2,enum aarch64_insn_variant variant,enum aarch64_insn_data3_type type)1129 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1130 			   enum aarch64_insn_register src,
1131 			   enum aarch64_insn_register reg1,
1132 			   enum aarch64_insn_register reg2,
1133 			   enum aarch64_insn_variant variant,
1134 			   enum aarch64_insn_data3_type type)
1135 {
1136 	u32 insn;
1137 
1138 	switch (type) {
1139 	case AARCH64_INSN_DATA3_MADD:
1140 		insn = aarch64_insn_get_madd_value();
1141 		break;
1142 	case AARCH64_INSN_DATA3_MSUB:
1143 		insn = aarch64_insn_get_msub_value();
1144 		break;
1145 	default:
1146 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1147 		return AARCH64_BREAK_FAULT;
1148 	}
1149 
1150 	switch (variant) {
1151 	case AARCH64_INSN_VARIANT_32BIT:
1152 		break;
1153 	case AARCH64_INSN_VARIANT_64BIT:
1154 		insn |= AARCH64_INSN_SF_BIT;
1155 		break;
1156 	default:
1157 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1158 		return AARCH64_BREAK_FAULT;
1159 	}
1160 
1161 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1162 
1163 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1164 
1165 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1166 					    reg1);
1167 
1168 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1169 					    reg2);
1170 }
1171 
aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,enum aarch64_insn_register src,enum aarch64_insn_register reg,int shift,enum aarch64_insn_variant variant,enum aarch64_insn_logic_type type)1172 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1173 					 enum aarch64_insn_register src,
1174 					 enum aarch64_insn_register reg,
1175 					 int shift,
1176 					 enum aarch64_insn_variant variant,
1177 					 enum aarch64_insn_logic_type type)
1178 {
1179 	u32 insn;
1180 
1181 	switch (type) {
1182 	case AARCH64_INSN_LOGIC_AND:
1183 		insn = aarch64_insn_get_and_value();
1184 		break;
1185 	case AARCH64_INSN_LOGIC_BIC:
1186 		insn = aarch64_insn_get_bic_value();
1187 		break;
1188 	case AARCH64_INSN_LOGIC_ORR:
1189 		insn = aarch64_insn_get_orr_value();
1190 		break;
1191 	case AARCH64_INSN_LOGIC_ORN:
1192 		insn = aarch64_insn_get_orn_value();
1193 		break;
1194 	case AARCH64_INSN_LOGIC_EOR:
1195 		insn = aarch64_insn_get_eor_value();
1196 		break;
1197 	case AARCH64_INSN_LOGIC_EON:
1198 		insn = aarch64_insn_get_eon_value();
1199 		break;
1200 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1201 		insn = aarch64_insn_get_ands_value();
1202 		break;
1203 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1204 		insn = aarch64_insn_get_bics_value();
1205 		break;
1206 	default:
1207 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1208 		return AARCH64_BREAK_FAULT;
1209 	}
1210 
1211 	switch (variant) {
1212 	case AARCH64_INSN_VARIANT_32BIT:
1213 		if (shift & ~(SZ_32 - 1)) {
1214 			pr_err("%s: invalid shift encoding %d\n", __func__,
1215 			       shift);
1216 			return AARCH64_BREAK_FAULT;
1217 		}
1218 		break;
1219 	case AARCH64_INSN_VARIANT_64BIT:
1220 		insn |= AARCH64_INSN_SF_BIT;
1221 		if (shift & ~(SZ_64 - 1)) {
1222 			pr_err("%s: invalid shift encoding %d\n", __func__,
1223 			       shift);
1224 			return AARCH64_BREAK_FAULT;
1225 		}
1226 		break;
1227 	default:
1228 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1229 		return AARCH64_BREAK_FAULT;
1230 	}
1231 
1232 
1233 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1234 
1235 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1236 
1237 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1238 
1239 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1240 }
1241 
1242 /*
1243  * Decode the imm field of a branch, and return the byte offset as a
1244  * signed value (so it can be used when computing a new branch
1245  * target).
1246  */
aarch64_get_branch_offset(u32 insn)1247 s32 aarch64_get_branch_offset(u32 insn)
1248 {
1249 	s32 imm;
1250 
1251 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1252 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1253 		return (imm << 6) >> 4;
1254 	}
1255 
1256 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1257 	    aarch64_insn_is_bcond(insn)) {
1258 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1259 		return (imm << 13) >> 11;
1260 	}
1261 
1262 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1263 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1264 		return (imm << 18) >> 16;
1265 	}
1266 
1267 	/* Unhandled instruction */
1268 	BUG();
1269 }
1270 
1271 /*
1272  * Encode the displacement of a branch in the imm field and return the
1273  * updated instruction.
1274  */
aarch64_set_branch_offset(u32 insn,s32 offset)1275 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1276 {
1277 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1278 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1279 						     offset >> 2);
1280 
1281 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1282 	    aarch64_insn_is_bcond(insn))
1283 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1284 						     offset >> 2);
1285 
1286 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1287 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1288 						     offset >> 2);
1289 
1290 	/* Unhandled instruction */
1291 	BUG();
1292 }
1293 
aarch64_insn_adrp_get_offset(u32 insn)1294 s32 aarch64_insn_adrp_get_offset(u32 insn)
1295 {
1296 	BUG_ON(!aarch64_insn_is_adrp(insn));
1297 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1298 }
1299 
aarch64_insn_adrp_set_offset(u32 insn,s32 offset)1300 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1301 {
1302 	BUG_ON(!aarch64_insn_is_adrp(insn));
1303 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1304 						offset >> 12);
1305 }
1306 
1307 /*
1308  * Extract the Op/CR data from a msr/mrs instruction.
1309  */
aarch64_insn_extract_system_reg(u32 insn)1310 u32 aarch64_insn_extract_system_reg(u32 insn)
1311 {
1312 	return (insn & 0x1FFFE0) >> 5;
1313 }
1314 
aarch32_insn_is_wide(u32 insn)1315 bool aarch32_insn_is_wide(u32 insn)
1316 {
1317 	return insn >= 0xe800;
1318 }
1319 
1320 /*
1321  * Macros/defines for extracting register numbers from instruction.
1322  */
aarch32_insn_extract_reg_num(u32 insn,int offset)1323 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1324 {
1325 	return (insn & (0xf << offset)) >> offset;
1326 }
1327 
1328 #define OPC2_MASK	0x7
1329 #define OPC2_OFFSET	5
aarch32_insn_mcr_extract_opc2(u32 insn)1330 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1331 {
1332 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1333 }
1334 
1335 #define CRM_MASK	0xf
aarch32_insn_mcr_extract_crm(u32 insn)1336 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1337 {
1338 	return insn & CRM_MASK;
1339 }
1340 
__check_eq(unsigned long pstate)1341 static bool __kprobes __check_eq(unsigned long pstate)
1342 {
1343 	return (pstate & PSR_Z_BIT) != 0;
1344 }
1345 
__check_ne(unsigned long pstate)1346 static bool __kprobes __check_ne(unsigned long pstate)
1347 {
1348 	return (pstate & PSR_Z_BIT) == 0;
1349 }
1350 
__check_cs(unsigned long pstate)1351 static bool __kprobes __check_cs(unsigned long pstate)
1352 {
1353 	return (pstate & PSR_C_BIT) != 0;
1354 }
1355 
__check_cc(unsigned long pstate)1356 static bool __kprobes __check_cc(unsigned long pstate)
1357 {
1358 	return (pstate & PSR_C_BIT) == 0;
1359 }
1360 
__check_mi(unsigned long pstate)1361 static bool __kprobes __check_mi(unsigned long pstate)
1362 {
1363 	return (pstate & PSR_N_BIT) != 0;
1364 }
1365 
__check_pl(unsigned long pstate)1366 static bool __kprobes __check_pl(unsigned long pstate)
1367 {
1368 	return (pstate & PSR_N_BIT) == 0;
1369 }
1370 
__check_vs(unsigned long pstate)1371 static bool __kprobes __check_vs(unsigned long pstate)
1372 {
1373 	return (pstate & PSR_V_BIT) != 0;
1374 }
1375 
__check_vc(unsigned long pstate)1376 static bool __kprobes __check_vc(unsigned long pstate)
1377 {
1378 	return (pstate & PSR_V_BIT) == 0;
1379 }
1380 
__check_hi(unsigned long pstate)1381 static bool __kprobes __check_hi(unsigned long pstate)
1382 {
1383 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1384 	return (pstate & PSR_C_BIT) != 0;
1385 }
1386 
__check_ls(unsigned long pstate)1387 static bool __kprobes __check_ls(unsigned long pstate)
1388 {
1389 	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1390 	return (pstate & PSR_C_BIT) == 0;
1391 }
1392 
__check_ge(unsigned long pstate)1393 static bool __kprobes __check_ge(unsigned long pstate)
1394 {
1395 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1396 	return (pstate & PSR_N_BIT) == 0;
1397 }
1398 
__check_lt(unsigned long pstate)1399 static bool __kprobes __check_lt(unsigned long pstate)
1400 {
1401 	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1402 	return (pstate & PSR_N_BIT) != 0;
1403 }
1404 
__check_gt(unsigned long pstate)1405 static bool __kprobes __check_gt(unsigned long pstate)
1406 {
1407 	/*PSR_N_BIT ^= PSR_V_BIT */
1408 	unsigned long temp = pstate ^ (pstate << 3);
1409 
1410 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1411 	return (temp & PSR_N_BIT) == 0;
1412 }
1413 
__check_le(unsigned long pstate)1414 static bool __kprobes __check_le(unsigned long pstate)
1415 {
1416 	/*PSR_N_BIT ^= PSR_V_BIT */
1417 	unsigned long temp = pstate ^ (pstate << 3);
1418 
1419 	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1420 	return (temp & PSR_N_BIT) != 0;
1421 }
1422 
__check_al(unsigned long pstate)1423 static bool __kprobes __check_al(unsigned long pstate)
1424 {
1425 	return true;
1426 }
1427 
1428 /*
1429  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1430  * it behaves identically to 0b1110 ("al").
1431  */
1432 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1433 	__check_eq, __check_ne, __check_cs, __check_cc,
1434 	__check_mi, __check_pl, __check_vs, __check_vc,
1435 	__check_hi, __check_ls, __check_ge, __check_lt,
1436 	__check_gt, __check_le, __check_al, __check_al
1437 };
1438 
range_of_ones(u64 val)1439 static bool range_of_ones(u64 val)
1440 {
1441 	/* Doesn't handle full ones or full zeroes */
1442 	u64 sval = val >> __ffs64(val);
1443 
1444 	/* One of Sean Eron Anderson's bithack tricks */
1445 	return ((sval + 1) & (sval)) == 0;
1446 }
1447 
aarch64_encode_immediate(u64 imm,enum aarch64_insn_variant variant,u32 insn)1448 static u32 aarch64_encode_immediate(u64 imm,
1449 				    enum aarch64_insn_variant variant,
1450 				    u32 insn)
1451 {
1452 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1453 	u64 mask = ~0UL;
1454 
1455 	/* Can't encode full zeroes or full ones */
1456 	if (!imm || !~imm)
1457 		return AARCH64_BREAK_FAULT;
1458 
1459 	switch (variant) {
1460 	case AARCH64_INSN_VARIANT_32BIT:
1461 		if (upper_32_bits(imm))
1462 			return AARCH64_BREAK_FAULT;
1463 		esz = 32;
1464 		break;
1465 	case AARCH64_INSN_VARIANT_64BIT:
1466 		insn |= AARCH64_INSN_SF_BIT;
1467 		esz = 64;
1468 		break;
1469 	default:
1470 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1471 		return AARCH64_BREAK_FAULT;
1472 	}
1473 
1474 	/*
1475 	 * Inverse of Replicate(). Try to spot a repeating pattern
1476 	 * with a pow2 stride.
1477 	 */
1478 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1479 		u64 emask = BIT(tmp) - 1;
1480 
1481 		if ((imm & emask) != ((imm >> tmp) & emask))
1482 			break;
1483 
1484 		esz = tmp;
1485 		mask = emask;
1486 	}
1487 
1488 	/* N is only set if we're encoding a 64bit value */
1489 	n = esz == 64;
1490 
1491 	/* Trim imm to the element size */
1492 	imm &= mask;
1493 
1494 	/* That's how many ones we need to encode */
1495 	ones = hweight64(imm);
1496 
1497 	/*
1498 	 * imms is set to (ones - 1), prefixed with a string of ones
1499 	 * and a zero if they fit. Cap it to 6 bits.
1500 	 */
1501 	imms  = ones - 1;
1502 	imms |= 0xf << ffs(esz);
1503 	imms &= BIT(6) - 1;
1504 
1505 	/* Compute the rotation */
1506 	if (range_of_ones(imm)) {
1507 		/*
1508 		 * Pattern: 0..01..10..0
1509 		 *
1510 		 * Compute how many rotate we need to align it right
1511 		 */
1512 		ror = __ffs64(imm);
1513 	} else {
1514 		/*
1515 		 * Pattern: 0..01..10..01..1
1516 		 *
1517 		 * Fill the unused top bits with ones, and check if
1518 		 * the result is a valid immediate (all ones with a
1519 		 * contiguous ranges of zeroes).
1520 		 */
1521 		imm |= ~mask;
1522 		if (!range_of_ones(~imm))
1523 			return AARCH64_BREAK_FAULT;
1524 
1525 		/*
1526 		 * Compute the rotation to get a continuous set of
1527 		 * ones, with the first bit set at position 0
1528 		 */
1529 		ror = fls(~imm);
1530 	}
1531 
1532 	/*
1533 	 * immr is the number of bits we need to rotate back to the
1534 	 * original set of ones. Note that this is relative to the
1535 	 * element size...
1536 	 */
1537 	immr = (esz - ror) % esz;
1538 
1539 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1540 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1541 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1542 }
1543 
aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,enum aarch64_insn_variant variant,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u64 imm)1544 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1545 				       enum aarch64_insn_variant variant,
1546 				       enum aarch64_insn_register Rn,
1547 				       enum aarch64_insn_register Rd,
1548 				       u64 imm)
1549 {
1550 	u32 insn;
1551 
1552 	switch (type) {
1553 	case AARCH64_INSN_LOGIC_AND:
1554 		insn = aarch64_insn_get_and_imm_value();
1555 		break;
1556 	case AARCH64_INSN_LOGIC_ORR:
1557 		insn = aarch64_insn_get_orr_imm_value();
1558 		break;
1559 	case AARCH64_INSN_LOGIC_EOR:
1560 		insn = aarch64_insn_get_eor_imm_value();
1561 		break;
1562 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1563 		insn = aarch64_insn_get_ands_imm_value();
1564 		break;
1565 	default:
1566 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1567 		return AARCH64_BREAK_FAULT;
1568 	}
1569 
1570 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1571 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1572 	return aarch64_encode_immediate(imm, variant, insn);
1573 }
1574 
aarch64_insn_gen_extr(enum aarch64_insn_variant variant,enum aarch64_insn_register Rm,enum aarch64_insn_register Rn,enum aarch64_insn_register Rd,u8 lsb)1575 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1576 			  enum aarch64_insn_register Rm,
1577 			  enum aarch64_insn_register Rn,
1578 			  enum aarch64_insn_register Rd,
1579 			  u8 lsb)
1580 {
1581 	u32 insn;
1582 
1583 	insn = aarch64_insn_get_extr_value();
1584 
1585 	switch (variant) {
1586 	case AARCH64_INSN_VARIANT_32BIT:
1587 		if (lsb > 31)
1588 			return AARCH64_BREAK_FAULT;
1589 		break;
1590 	case AARCH64_INSN_VARIANT_64BIT:
1591 		if (lsb > 63)
1592 			return AARCH64_BREAK_FAULT;
1593 		insn |= AARCH64_INSN_SF_BIT;
1594 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1595 		break;
1596 	default:
1597 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1598 		return AARCH64_BREAK_FAULT;
1599 	}
1600 
1601 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1602 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1603 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1604 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1605 }
1606