1 /*
2  * arch/sh/kernel/traps_64.c
3  *
4  * Copyright (C) 2000, 2001  Paolo Alberelli
5  * Copyright (C) 2003, 2004  Paul Mundt
6  * Copyright (C) 2003, 2004  Richard Curnow
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
24 #include <linux/kallsyms.h>
25 #include <linux/interrupt.h>
26 #include <linux/sysctl.h>
27 #include <linux/module.h>
28 #include <linux/perf_event.h>
29 #include <linux/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/alignment.h>
32 #include <asm/processor.h>
33 #include <asm/pgtable.h>
34 #include <asm/fpu.h>
35 
read_opcode(reg_size_t pc,insn_size_t * result_opcode,int from_user_mode)36 static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
37 {
38 	int get_user_error;
39 	unsigned long aligned_pc;
40 	insn_size_t opcode;
41 
42 	if ((pc & 3) == 1) {
43 		/* SHmedia */
44 		aligned_pc = pc & ~3;
45 		if (from_user_mode) {
46 			if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
47 				get_user_error = -EFAULT;
48 			} else {
49 				get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
50 				*result_opcode = opcode;
51 			}
52 			return get_user_error;
53 		} else {
54 			/* If the fault was in the kernel, we can either read
55 			 * this directly, or if not, we fault.
56 			*/
57 			*result_opcode = *(insn_size_t *)aligned_pc;
58 			return 0;
59 		}
60 	} else if ((pc & 1) == 0) {
61 		/* SHcompact */
62 		/* TODO : provide handling for this.  We don't really support
63 		   user-mode SHcompact yet, and for a kernel fault, this would
64 		   have to come from a module built for SHcompact.  */
65 		return -EFAULT;
66 	} else {
67 		/* misaligned */
68 		return -EFAULT;
69 	}
70 }
71 
address_is_sign_extended(__u64 a)72 static int address_is_sign_extended(__u64 a)
73 {
74 	__u64 b;
75 #if (NEFF == 32)
76 	b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
77 	return (b == a) ? 1 : 0;
78 #else
79 #error "Sign extend check only works for NEFF==32"
80 #endif
81 }
82 
83 /* return -1 for fault, 0 for OK */
generate_and_check_address(struct pt_regs * regs,insn_size_t opcode,int displacement_not_indexed,int width_shift,__u64 * address)84 static int generate_and_check_address(struct pt_regs *regs,
85 				      insn_size_t opcode,
86 				      int displacement_not_indexed,
87 				      int width_shift,
88 				      __u64 *address)
89 {
90 	__u64 base_address, addr;
91 	int basereg;
92 
93 	switch (1 << width_shift) {
94 	case 1: inc_unaligned_byte_access(); break;
95 	case 2: inc_unaligned_word_access(); break;
96 	case 4: inc_unaligned_dword_access(); break;
97 	case 8: inc_unaligned_multi_access(); break;
98 	}
99 
100 	basereg = (opcode >> 20) & 0x3f;
101 	base_address = regs->regs[basereg];
102 	if (displacement_not_indexed) {
103 		__s64 displacement;
104 		displacement = (opcode >> 10) & 0x3ff;
105 		displacement = sign_extend64(displacement, 9);
106 		addr = (__u64)((__s64)base_address + (displacement << width_shift));
107 	} else {
108 		__u64 offset;
109 		int offsetreg;
110 		offsetreg = (opcode >> 10) & 0x3f;
111 		offset = regs->regs[offsetreg];
112 		addr = base_address + offset;
113 	}
114 
115 	/* Check sign extended */
116 	if (!address_is_sign_extended(addr))
117 		return -1;
118 
119 	/* Check accessible.  For misaligned access in the kernel, assume the
120 	   address is always accessible (and if not, just fault when the
121 	   load/store gets done.) */
122 	if (user_mode(regs)) {
123 		inc_unaligned_user_access();
124 
125 		if (addr >= TASK_SIZE)
126 			return -1;
127 	} else
128 		inc_unaligned_kernel_access();
129 
130 	*address = addr;
131 
132 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
133 	unaligned_fixups_notify(current, opcode, regs);
134 
135 	return 0;
136 }
137 
misaligned_kernel_word_load(__u64 address,int do_sign_extend,__u64 * result)138 static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
139 {
140 	unsigned short x;
141 	unsigned char *p, *q;
142 	p = (unsigned char *) (int) address;
143 	q = (unsigned char *) &x;
144 	q[0] = p[0];
145 	q[1] = p[1];
146 
147 	if (do_sign_extend) {
148 		*result = (__u64)(__s64) *(short *) &x;
149 	} else {
150 		*result = (__u64) x;
151 	}
152 }
153 
misaligned_kernel_word_store(__u64 address,__u64 value)154 static void misaligned_kernel_word_store(__u64 address, __u64 value)
155 {
156 	unsigned short x;
157 	unsigned char *p, *q;
158 	p = (unsigned char *) (int) address;
159 	q = (unsigned char *) &x;
160 
161 	x = (__u16) value;
162 	p[0] = q[0];
163 	p[1] = q[1];
164 }
165 
misaligned_load(struct pt_regs * regs,insn_size_t opcode,int displacement_not_indexed,int width_shift,int do_sign_extend)166 static int misaligned_load(struct pt_regs *regs,
167 			   insn_size_t opcode,
168 			   int displacement_not_indexed,
169 			   int width_shift,
170 			   int do_sign_extend)
171 {
172 	/* Return -1 for a fault, 0 for OK */
173 	int error;
174 	int destreg;
175 	__u64 address;
176 
177 	error = generate_and_check_address(regs, opcode,
178 			displacement_not_indexed, width_shift, &address);
179 	if (error < 0)
180 		return error;
181 
182 	destreg = (opcode >> 4) & 0x3f;
183 	if (user_mode(regs)) {
184 		__u64 buffer;
185 
186 		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
187 			return -1;
188 		}
189 
190 		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
191 			return -1; /* fault */
192 		}
193 		switch (width_shift) {
194 		case 1:
195 			if (do_sign_extend) {
196 				regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
197 			} else {
198 				regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
199 			}
200 			break;
201 		case 2:
202 			regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
203 			break;
204 		case 3:
205 			regs->regs[destreg] = buffer;
206 			break;
207 		default:
208 			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
209 				width_shift, (unsigned long) regs->pc);
210 			break;
211 		}
212 	} else {
213 		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
214 		__u64 lo, hi;
215 
216 		switch (width_shift) {
217 		case 1:
218 			misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
219 			break;
220 		case 2:
221 			asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
222 			asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
223 			regs->regs[destreg] = lo | hi;
224 			break;
225 		case 3:
226 			asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
227 			asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
228 			regs->regs[destreg] = lo | hi;
229 			break;
230 
231 		default:
232 			printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
233 				width_shift, (unsigned long) regs->pc);
234 			break;
235 		}
236 	}
237 
238 	return 0;
239 }
240 
misaligned_store(struct pt_regs * regs,insn_size_t opcode,int displacement_not_indexed,int width_shift)241 static int misaligned_store(struct pt_regs *regs,
242 			    insn_size_t opcode,
243 			    int displacement_not_indexed,
244 			    int width_shift)
245 {
246 	/* Return -1 for a fault, 0 for OK */
247 	int error;
248 	int srcreg;
249 	__u64 address;
250 
251 	error = generate_and_check_address(regs, opcode,
252 			displacement_not_indexed, width_shift, &address);
253 	if (error < 0)
254 		return error;
255 
256 	srcreg = (opcode >> 4) & 0x3f;
257 	if (user_mode(regs)) {
258 		__u64 buffer;
259 
260 		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
261 			return -1;
262 		}
263 
264 		switch (width_shift) {
265 		case 1:
266 			*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
267 			break;
268 		case 2:
269 			*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
270 			break;
271 		case 3:
272 			buffer = regs->regs[srcreg];
273 			break;
274 		default:
275 			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
276 				width_shift, (unsigned long) regs->pc);
277 			break;
278 		}
279 
280 		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
281 			return -1; /* fault */
282 		}
283 	} else {
284 		/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
285 		__u64 val = regs->regs[srcreg];
286 
287 		switch (width_shift) {
288 		case 1:
289 			misaligned_kernel_word_store(address, val);
290 			break;
291 		case 2:
292 			asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
293 			asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
294 			break;
295 		case 3:
296 			asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
297 			asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
298 			break;
299 
300 		default:
301 			printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
302 				width_shift, (unsigned long) regs->pc);
303 			break;
304 		}
305 	}
306 
307 	return 0;
308 }
309 
310 /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
311    error. */
misaligned_fpu_load(struct pt_regs * regs,insn_size_t opcode,int displacement_not_indexed,int width_shift,int do_paired_load)312 static int misaligned_fpu_load(struct pt_regs *regs,
313 			   insn_size_t opcode,
314 			   int displacement_not_indexed,
315 			   int width_shift,
316 			   int do_paired_load)
317 {
318 	/* Return -1 for a fault, 0 for OK */
319 	int error;
320 	int destreg;
321 	__u64 address;
322 
323 	error = generate_and_check_address(regs, opcode,
324 			displacement_not_indexed, width_shift, &address);
325 	if (error < 0)
326 		return error;
327 
328 	destreg = (opcode >> 4) & 0x3f;
329 	if (user_mode(regs)) {
330 		__u64 buffer;
331 		__u32 buflo, bufhi;
332 
333 		if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
334 			return -1;
335 		}
336 
337 		if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
338 			return -1; /* fault */
339 		}
340 		/* 'current' may be the current owner of the FPU state, so
341 		   context switch the registers into memory so they can be
342 		   indexed by register number. */
343 		if (last_task_used_math == current) {
344 			enable_fpu();
345 			save_fpu(current);
346 			disable_fpu();
347 			last_task_used_math = NULL;
348 			regs->sr |= SR_FD;
349 		}
350 
351 		buflo = *(__u32*) &buffer;
352 		bufhi = *(1 + (__u32*) &buffer);
353 
354 		switch (width_shift) {
355 		case 2:
356 			current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
357 			break;
358 		case 3:
359 			if (do_paired_load) {
360 				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
361 				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
362 			} else {
363 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
364 				current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
365 				current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
366 #else
367 				current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
368 				current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
369 #endif
370 			}
371 			break;
372 		default:
373 			printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
374 				width_shift, (unsigned long) regs->pc);
375 			break;
376 		}
377 		return 0;
378 	} else {
379 		die ("Misaligned FPU load inside kernel", regs, 0);
380 		return -1;
381 	}
382 }
383 
misaligned_fpu_store(struct pt_regs * regs,insn_size_t opcode,int displacement_not_indexed,int width_shift,int do_paired_load)384 static int misaligned_fpu_store(struct pt_regs *regs,
385 			   insn_size_t opcode,
386 			   int displacement_not_indexed,
387 			   int width_shift,
388 			   int do_paired_load)
389 {
390 	/* Return -1 for a fault, 0 for OK */
391 	int error;
392 	int srcreg;
393 	__u64 address;
394 
395 	error = generate_and_check_address(regs, opcode,
396 			displacement_not_indexed, width_shift, &address);
397 	if (error < 0)
398 		return error;
399 
400 	srcreg = (opcode >> 4) & 0x3f;
401 	if (user_mode(regs)) {
402 		__u64 buffer;
403 		/* Initialise these to NaNs. */
404 		__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
405 
406 		if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
407 			return -1;
408 		}
409 
410 		/* 'current' may be the current owner of the FPU state, so
411 		   context switch the registers into memory so they can be
412 		   indexed by register number. */
413 		if (last_task_used_math == current) {
414 			enable_fpu();
415 			save_fpu(current);
416 			disable_fpu();
417 			last_task_used_math = NULL;
418 			regs->sr |= SR_FD;
419 		}
420 
421 		switch (width_shift) {
422 		case 2:
423 			buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
424 			break;
425 		case 3:
426 			if (do_paired_load) {
427 				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
428 				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
429 			} else {
430 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
431 				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
432 				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
433 #else
434 				buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
435 				bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
436 #endif
437 			}
438 			break;
439 		default:
440 			printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
441 				width_shift, (unsigned long) regs->pc);
442 			break;
443 		}
444 
445 		*(__u32*) &buffer = buflo;
446 		*(1 + (__u32*) &buffer) = bufhi;
447 		if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
448 			return -1; /* fault */
449 		}
450 		return 0;
451 	} else {
452 		die ("Misaligned FPU load inside kernel", regs, 0);
453 		return -1;
454 	}
455 }
456 
misaligned_fixup(struct pt_regs * regs)457 static int misaligned_fixup(struct pt_regs *regs)
458 {
459 	insn_size_t opcode;
460 	int error;
461 	int major, minor;
462 	unsigned int user_action;
463 
464 	user_action = unaligned_user_action();
465 	if (!(user_action & UM_FIXUP))
466 		return -1;
467 
468 	error = read_opcode(regs->pc, &opcode, user_mode(regs));
469 	if (error < 0) {
470 		return error;
471 	}
472 	major = (opcode >> 26) & 0x3f;
473 	minor = (opcode >> 16) & 0xf;
474 
475 	switch (major) {
476 		case (0x84>>2): /* LD.W */
477 			error = misaligned_load(regs, opcode, 1, 1, 1);
478 			break;
479 		case (0xb0>>2): /* LD.UW */
480 			error = misaligned_load(regs, opcode, 1, 1, 0);
481 			break;
482 		case (0x88>>2): /* LD.L */
483 			error = misaligned_load(regs, opcode, 1, 2, 1);
484 			break;
485 		case (0x8c>>2): /* LD.Q */
486 			error = misaligned_load(regs, opcode, 1, 3, 0);
487 			break;
488 
489 		case (0xa4>>2): /* ST.W */
490 			error = misaligned_store(regs, opcode, 1, 1);
491 			break;
492 		case (0xa8>>2): /* ST.L */
493 			error = misaligned_store(regs, opcode, 1, 2);
494 			break;
495 		case (0xac>>2): /* ST.Q */
496 			error = misaligned_store(regs, opcode, 1, 3);
497 			break;
498 
499 		case (0x40>>2): /* indexed loads */
500 			switch (minor) {
501 				case 0x1: /* LDX.W */
502 					error = misaligned_load(regs, opcode, 0, 1, 1);
503 					break;
504 				case 0x5: /* LDX.UW */
505 					error = misaligned_load(regs, opcode, 0, 1, 0);
506 					break;
507 				case 0x2: /* LDX.L */
508 					error = misaligned_load(regs, opcode, 0, 2, 1);
509 					break;
510 				case 0x3: /* LDX.Q */
511 					error = misaligned_load(regs, opcode, 0, 3, 0);
512 					break;
513 				default:
514 					error = -1;
515 					break;
516 			}
517 			break;
518 
519 		case (0x60>>2): /* indexed stores */
520 			switch (minor) {
521 				case 0x1: /* STX.W */
522 					error = misaligned_store(regs, opcode, 0, 1);
523 					break;
524 				case 0x2: /* STX.L */
525 					error = misaligned_store(regs, opcode, 0, 2);
526 					break;
527 				case 0x3: /* STX.Q */
528 					error = misaligned_store(regs, opcode, 0, 3);
529 					break;
530 				default:
531 					error = -1;
532 					break;
533 			}
534 			break;
535 
536 		case (0x94>>2): /* FLD.S */
537 			error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
538 			break;
539 		case (0x98>>2): /* FLD.P */
540 			error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
541 			break;
542 		case (0x9c>>2): /* FLD.D */
543 			error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
544 			break;
545 		case (0x1c>>2): /* floating indexed loads */
546 			switch (minor) {
547 			case 0x8: /* FLDX.S */
548 				error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
549 				break;
550 			case 0xd: /* FLDX.P */
551 				error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
552 				break;
553 			case 0x9: /* FLDX.D */
554 				error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
555 				break;
556 			default:
557 				error = -1;
558 				break;
559 			}
560 			break;
561 		case (0xb4>>2): /* FLD.S */
562 			error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
563 			break;
564 		case (0xb8>>2): /* FLD.P */
565 			error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
566 			break;
567 		case (0xbc>>2): /* FLD.D */
568 			error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
569 			break;
570 		case (0x3c>>2): /* floating indexed stores */
571 			switch (minor) {
572 			case 0x8: /* FSTX.S */
573 				error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
574 				break;
575 			case 0xd: /* FSTX.P */
576 				error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
577 				break;
578 			case 0x9: /* FSTX.D */
579 				error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
580 				break;
581 			default:
582 				error = -1;
583 				break;
584 			}
585 			break;
586 
587 		default:
588 			/* Fault */
589 			error = -1;
590 			break;
591 	}
592 
593 	if (error < 0) {
594 		return error;
595 	} else {
596 		regs->pc += 4; /* Skip the instruction that's just been emulated */
597 		return 0;
598 	}
599 }
600 
do_unhandled_exception(int signr,char * str,unsigned long error,struct pt_regs * regs)601 static void do_unhandled_exception(int signr, char *str, unsigned long error,
602 				   struct pt_regs *regs)
603 {
604 	if (user_mode(regs))
605 		force_sig(signr, current);
606 
607 	die_if_no_fixup(str, regs, error);
608 }
609 
610 #define DO_ERROR(signr, str, name) \
611 asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
612 { \
613 	do_unhandled_exception(signr, str, error_code, regs); \
614 }
615 
616 DO_ERROR(SIGILL,  "illegal slot instruction", illegal_slot_inst)
617 DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
618 
619 #if defined(CONFIG_SH64_ID2815_WORKAROUND)
620 
621 #define OPCODE_INVALID      0
622 #define OPCODE_USER_VALID   1
623 #define OPCODE_PRIV_VALID   2
624 
625 /* getcon/putcon - requires checking which control register is referenced. */
626 #define OPCODE_CTRL_REG     3
627 
628 /* Table of valid opcodes for SHmedia mode.
629    Form a 10-bit value by concatenating the major/minor opcodes i.e.
630    opcode[31:26,20:16].  The 6 MSBs of this value index into the following
631    array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
632    LSBs==4'b0000 etc). */
633 static unsigned long shmedia_opcode_table[64] = {
634 	0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
635 	0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
636 	0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
637 	0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
638 	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
639 	0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
640 	0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
641 	0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
642 };
643 
644 /* Workaround SH5-101 cut2 silicon defect #2815 :
645    in some situations, inter-mode branches from SHcompact -> SHmedia
646    which should take ITLBMISS or EXECPROT exceptions at the target
647    falsely take RESINST at the target instead. */
do_reserved_inst(unsigned long error_code,struct pt_regs * regs)648 void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
649 {
650 	insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
651 	unsigned long pc, aligned_pc;
652 	unsigned long index, shift;
653 	unsigned long major, minor, combined;
654 	unsigned long reserved_field;
655 	int opcode_state;
656 	int get_user_error;
657 	int signr = SIGILL;
658 	char *exception_name = "reserved_instruction";
659 
660 	pc = regs->pc;
661 
662 	/* SHcompact is not handled */
663 	if (unlikely((pc & 3) == 0))
664 		goto out;
665 
666 	/* SHmedia : check for defect.  This requires executable vmas
667 	   to be readable too. */
668 	aligned_pc = pc & ~3;
669 	if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
670 		get_user_error = -EFAULT;
671 	else
672 		get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
673 
674 	if (get_user_error < 0) {
675 		/*
676 		 * Error trying to read opcode.  This typically means a
677 		 * real fault, not a RESINST any more.  So change the
678 		 * codes.
679 		 */
680 		exception_name = "address error (exec)";
681 		signr = SIGSEGV;
682 		goto out;
683 	}
684 
685 	/* These bits are currently reserved as zero in all valid opcodes */
686 	reserved_field = opcode & 0xf;
687 	if (unlikely(reserved_field))
688 		goto out;	/* invalid opcode */
689 
690 	major = (opcode >> 26) & 0x3f;
691 	minor = (opcode >> 16) & 0xf;
692 	combined = (major << 4) | minor;
693 	index = major;
694 	shift = minor << 1;
695 	opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
696 	switch (opcode_state) {
697 	case OPCODE_INVALID:
698 		/* Trap. */
699 		break;
700 	case OPCODE_USER_VALID:
701 		/*
702 		 * Restart the instruction: the branch to the instruction
703 		 * will now be from an RTE not from SHcompact so the
704 		 * silicon defect won't be triggered.
705 		 */
706 		return;
707 	case OPCODE_PRIV_VALID:
708 		if (!user_mode(regs)) {
709 			/*
710 			 * Should only ever get here if a module has
711 			 * SHcompact code inside it. If so, the same fix
712 			 * up is needed.
713 			 */
714 			return; /* same reason */
715 		}
716 
717 		/*
718 		 * Otherwise, user mode trying to execute a privileged
719 		 * instruction - fall through to trap.
720 		 */
721 		break;
722 	case OPCODE_CTRL_REG:
723 		/* If in privileged mode, return as above. */
724 		if (!user_mode(regs))
725 			return;
726 
727 		/* In user mode ... */
728 		if (combined == 0x9f) { /* GETCON */
729 			unsigned long regno = (opcode >> 20) & 0x3f;
730 
731 			if (regno >= 62)
732 				return;
733 
734 			/* reserved/privileged control register => trap */
735 		} else if (combined == 0x1bf) { /* PUTCON */
736 			unsigned long regno = (opcode >> 4) & 0x3f;
737 
738 			if (regno >= 62)
739 				return;
740 
741 			/* reserved/privileged control register => trap */
742 		}
743 
744 		break;
745 	default:
746 		/* Fall through to trap. */
747 		break;
748 	}
749 
750 out:
751 	do_unhandled_exception(signr, exception_name, error_code, regs);
752 }
753 
754 #else /* CONFIG_SH64_ID2815_WORKAROUND */
755 
756 /* If the workaround isn't needed, this is just a straightforward reserved
757    instruction */
758 DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
759 
760 #endif /* CONFIG_SH64_ID2815_WORKAROUND */
761 
762 /* Called with interrupts disabled */
do_exception_error(unsigned long ex,struct pt_regs * regs)763 asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
764 {
765 	die_if_kernel("exception", regs, ex);
766 }
767 
do_unknown_trapa(unsigned long scId,struct pt_regs * regs)768 asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
769 {
770 	/* Syscall debug */
771 	printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
772 
773 	die_if_kernel("unknown trapa", regs, scId);
774 
775 	return -ENOSYS;
776 }
777 
778 /* Implement misaligned load/store handling for kernel (and optionally for user
779    mode too).  Limitation : only SHmedia mode code is handled - there is no
780    handling at all for misaligned accesses occurring in SHcompact code yet. */
781 
do_address_error_load(unsigned long error_code,struct pt_regs * regs)782 asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
783 {
784 	if (misaligned_fixup(regs) < 0)
785 		do_unhandled_exception(SIGSEGV, "address error(load)",
786 				       error_code, regs);
787 }
788 
do_address_error_store(unsigned long error_code,struct pt_regs * regs)789 asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
790 {
791 	if (misaligned_fixup(regs) < 0)
792 		do_unhandled_exception(SIGSEGV, "address error(store)",
793 				error_code, regs);
794 }
795 
do_debug_interrupt(unsigned long code,struct pt_regs * regs)796 asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
797 {
798 	u64 peek_real_address_q(u64 addr);
799 	u64 poke_real_address_q(u64 addr, u64 val);
800 	unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
801 	unsigned long long exp_cause;
802 	/* It's not worth ioremapping the debug module registers for the amount
803 	   of access we make to them - just go direct to their physical
804 	   addresses. */
805 	exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
806 	if (exp_cause & ~4)
807 		printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
808 			(unsigned long)(exp_cause & 0xffffffff));
809 	show_state();
810 	/* Clear all DEBUGINT causes */
811 	poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
812 }
813 
per_cpu_trap_init(void)814 void per_cpu_trap_init(void)
815 {
816 	/* Nothing to do for now, VBR initialization later. */
817 }
818