1 /*
2 * Originally written by Glenn Engel, Lake Stevens Instrument Division
3 *
4 * Contributed by HP Systems
5 *
6 * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
7 * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
8 *
9 * Copyright (C) 1995 Andreas Busse
10 *
11 * Copyright (C) 2003 MontaVista Software Inc.
12 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
13 *
14 * Copyright (C) 2004-2005 MontaVista Software Inc.
15 * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
16 *
17 * Copyright (C) 2007-2008 Wind River Systems, Inc.
18 * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
19 *
20 * This file is licensed under the terms of the GNU General Public License
21 * version 2. This program is licensed "as is" without any warranty of any
22 * kind, whether express or implied.
23 */
24
25 #include <linux/ptrace.h> /* for linux pt_regs struct */
26 #include <linux/kgdb.h>
27 #include <linux/kdebug.h>
28 #include <linux/sched.h>
29 #include <linux/smp.h>
30 #include <asm/inst.h>
31 #include <asm/fpu.h>
32 #include <asm/cacheflush.h>
33 #include <asm/processor.h>
34 #include <asm/sigcontext.h>
35 #include <linux/uaccess.h>
36
37 static struct hard_trap_info {
38 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
39 unsigned char signo; /* Signal that we map this trap into */
40 } hard_trap_info[] = {
41 { 6, SIGBUS }, /* instruction bus error */
42 { 7, SIGBUS }, /* data bus error */
43 { 9, SIGTRAP }, /* break */
44 /* { 11, SIGILL }, */ /* CPU unusable */
45 { 12, SIGFPE }, /* overflow */
46 { 13, SIGTRAP }, /* trap */
47 { 14, SIGSEGV }, /* virtual instruction cache coherency */
48 { 15, SIGFPE }, /* floating point exception */
49 { 23, SIGSEGV }, /* watch */
50 { 31, SIGSEGV }, /* virtual data cache coherency */
51 { 0, 0} /* Must be last */
52 };
53
54 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
55 {
56 { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
57 { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
58 { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
59 { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
60 { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
61 { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
62 { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
63 { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
64 { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
65 { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
66 { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
67 { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
68 { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
69 { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
70 { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
71 { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
72 { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
73 { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
74 { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
75 { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
76 { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
77 { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
78 { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
79 { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
80 { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
81 { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
82 { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
83 { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
84 { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
85 { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
86 { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
87 { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
88 { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
89 { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
90 { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
91 { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
92 { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
93 { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
94 { "f0", GDB_SIZEOF_REG, 0 },
95 { "f1", GDB_SIZEOF_REG, 1 },
96 { "f2", GDB_SIZEOF_REG, 2 },
97 { "f3", GDB_SIZEOF_REG, 3 },
98 { "f4", GDB_SIZEOF_REG, 4 },
99 { "f5", GDB_SIZEOF_REG, 5 },
100 { "f6", GDB_SIZEOF_REG, 6 },
101 { "f7", GDB_SIZEOF_REG, 7 },
102 { "f8", GDB_SIZEOF_REG, 8 },
103 { "f9", GDB_SIZEOF_REG, 9 },
104 { "f10", GDB_SIZEOF_REG, 10 },
105 { "f11", GDB_SIZEOF_REG, 11 },
106 { "f12", GDB_SIZEOF_REG, 12 },
107 { "f13", GDB_SIZEOF_REG, 13 },
108 { "f14", GDB_SIZEOF_REG, 14 },
109 { "f15", GDB_SIZEOF_REG, 15 },
110 { "f16", GDB_SIZEOF_REG, 16 },
111 { "f17", GDB_SIZEOF_REG, 17 },
112 { "f18", GDB_SIZEOF_REG, 18 },
113 { "f19", GDB_SIZEOF_REG, 19 },
114 { "f20", GDB_SIZEOF_REG, 20 },
115 { "f21", GDB_SIZEOF_REG, 21 },
116 { "f22", GDB_SIZEOF_REG, 22 },
117 { "f23", GDB_SIZEOF_REG, 23 },
118 { "f24", GDB_SIZEOF_REG, 24 },
119 { "f25", GDB_SIZEOF_REG, 25 },
120 { "f26", GDB_SIZEOF_REG, 26 },
121 { "f27", GDB_SIZEOF_REG, 27 },
122 { "f28", GDB_SIZEOF_REG, 28 },
123 { "f29", GDB_SIZEOF_REG, 29 },
124 { "f30", GDB_SIZEOF_REG, 30 },
125 { "f31", GDB_SIZEOF_REG, 31 },
126 { "fsr", GDB_SIZEOF_REG, 0 },
127 { "fir", GDB_SIZEOF_REG, 0 },
128 };
129
dbg_set_reg(int regno,void * mem,struct pt_regs * regs)130 int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
131 {
132 int fp_reg;
133
134 if (regno < 0 || regno >= DBG_MAX_REG_NUM)
135 return -EINVAL;
136
137 if (dbg_reg_def[regno].offset != -1 && regno < 38) {
138 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
139 dbg_reg_def[regno].size);
140 } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
141 /* FP registers 38 -> 69 */
142 if (!(regs->cp0_status & ST0_CU1))
143 return 0;
144 if (regno == 70) {
145 /* Process the fcr31/fsr (register 70) */
146 memcpy((void *)¤t->thread.fpu.fcr31, mem,
147 dbg_reg_def[regno].size);
148 goto out_save;
149 } else if (regno == 71) {
150 /* Ignore the fir (register 71) */
151 goto out_save;
152 }
153 fp_reg = dbg_reg_def[regno].offset;
154 memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem,
155 dbg_reg_def[regno].size);
156 out_save:
157 restore_fp(current);
158 }
159
160 return 0;
161 }
162
dbg_get_reg(int regno,void * mem,struct pt_regs * regs)163 char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
164 {
165 int fp_reg;
166
167 if (regno >= DBG_MAX_REG_NUM || regno < 0)
168 return NULL;
169
170 if (dbg_reg_def[regno].offset != -1 && regno < 38) {
171 /* First 38 registers */
172 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
173 dbg_reg_def[regno].size);
174 } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
175 /* FP registers 38 -> 69 */
176 if (!(regs->cp0_status & ST0_CU1))
177 goto out;
178 save_fp(current);
179 if (regno == 70) {
180 /* Process the fcr31/fsr (register 70) */
181 memcpy(mem, (void *)¤t->thread.fpu.fcr31,
182 dbg_reg_def[regno].size);
183 goto out;
184 } else if (regno == 71) {
185 /* Ignore the fir (register 71) */
186 memset(mem, 0, dbg_reg_def[regno].size);
187 goto out;
188 }
189 fp_reg = dbg_reg_def[regno].offset;
190 memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg],
191 dbg_reg_def[regno].size);
192 }
193
194 out:
195 return dbg_reg_def[regno].name;
196
197 }
198
arch_kgdb_breakpoint(void)199 void arch_kgdb_breakpoint(void)
200 {
201 __asm__ __volatile__(
202 ".globl breakinst\n\t"
203 ".set\tnoreorder\n\t"
204 "nop\n"
205 "breakinst:\tbreak\n\t"
206 "nop\n\t"
207 ".set\treorder");
208 }
209
kgdb_call_nmi_hook(void * ignored)210 static void kgdb_call_nmi_hook(void *ignored)
211 {
212 mm_segment_t old_fs;
213
214 old_fs = get_fs();
215 set_fs(get_ds());
216
217 kgdb_nmicallback(raw_smp_processor_id(), NULL);
218
219 set_fs(old_fs);
220 }
221
kgdb_roundup_cpus(unsigned long flags)222 void kgdb_roundup_cpus(unsigned long flags)
223 {
224 local_irq_enable();
225 smp_call_function(kgdb_call_nmi_hook, NULL, 0);
226 local_irq_disable();
227 }
228
compute_signal(int tt)229 static int compute_signal(int tt)
230 {
231 struct hard_trap_info *ht;
232
233 for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
234 if (ht->tt == tt)
235 return ht->signo;
236
237 return SIGHUP; /* default for things we don't know about */
238 }
239
240 /*
241 * Similar to regs_to_gdb_regs() except that process is sleeping and so
242 * we may not be able to get all the info.
243 */
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)244 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
245 {
246 int reg;
247 #if (KGDB_GDB_REG_SIZE == 32)
248 u32 *ptr = (u32 *)gdb_regs;
249 #else
250 u64 *ptr = (u64 *)gdb_regs;
251 #endif
252
253 for (reg = 0; reg < 16; reg++)
254 *(ptr++) = 0;
255
256 /* S0 - S7 */
257 *(ptr++) = p->thread.reg16;
258 *(ptr++) = p->thread.reg17;
259 *(ptr++) = p->thread.reg18;
260 *(ptr++) = p->thread.reg19;
261 *(ptr++) = p->thread.reg20;
262 *(ptr++) = p->thread.reg21;
263 *(ptr++) = p->thread.reg22;
264 *(ptr++) = p->thread.reg23;
265
266 for (reg = 24; reg < 28; reg++)
267 *(ptr++) = 0;
268
269 /* GP, SP, FP, RA */
270 *(ptr++) = (long)p;
271 *(ptr++) = p->thread.reg29;
272 *(ptr++) = p->thread.reg30;
273 *(ptr++) = p->thread.reg31;
274
275 *(ptr++) = p->thread.cp0_status;
276
277 /* lo, hi */
278 *(ptr++) = 0;
279 *(ptr++) = 0;
280
281 /*
282 * BadVAddr, Cause
283 * Ideally these would come from the last exception frame up the stack
284 * but that requires unwinding, otherwise we can't know much for sure.
285 */
286 *(ptr++) = 0;
287 *(ptr++) = 0;
288
289 /*
290 * PC
291 * use return address (RA), i.e. the moment after return from resume()
292 */
293 *(ptr++) = p->thread.reg31;
294 }
295
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long pc)296 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
297 {
298 regs->cp0_epc = pc;
299 }
300
301 /*
302 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
303 * then try to fall into the debugger
304 */
kgdb_mips_notify(struct notifier_block * self,unsigned long cmd,void * ptr)305 static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
306 void *ptr)
307 {
308 struct die_args *args = (struct die_args *)ptr;
309 struct pt_regs *regs = args->regs;
310 int trap = (regs->cp0_cause & 0x7c) >> 2;
311 mm_segment_t old_fs;
312
313 #ifdef CONFIG_KPROBES
314 /*
315 * Return immediately if the kprobes fault notifier has set
316 * DIE_PAGE_FAULT.
317 */
318 if (cmd == DIE_PAGE_FAULT)
319 return NOTIFY_DONE;
320 #endif /* CONFIG_KPROBES */
321
322 /* Userspace events, ignore. */
323 if (user_mode(regs))
324 return NOTIFY_DONE;
325
326 /* Kernel mode. Set correct address limit */
327 old_fs = get_fs();
328 set_fs(get_ds());
329
330 if (atomic_read(&kgdb_active) != -1)
331 kgdb_nmicallback(smp_processor_id(), regs);
332
333 if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
334 set_fs(old_fs);
335 return NOTIFY_DONE;
336 }
337
338 if (atomic_read(&kgdb_setting_breakpoint))
339 if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
340 regs->cp0_epc += 4;
341
342 /* In SMP mode, __flush_cache_all does IPI */
343 local_irq_enable();
344 __flush_cache_all();
345
346 set_fs(old_fs);
347 return NOTIFY_STOP;
348 }
349
350 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
kgdb_ll_trap(int cmd,const char * str,struct pt_regs * regs,long err,int trap,int sig)351 int kgdb_ll_trap(int cmd, const char *str,
352 struct pt_regs *regs, long err, int trap, int sig)
353 {
354 struct die_args args = {
355 .regs = regs,
356 .str = str,
357 .err = err,
358 .trapnr = trap,
359 .signr = sig,
360
361 };
362
363 if (!kgdb_io_module_registered)
364 return NOTIFY_DONE;
365
366 return kgdb_mips_notify(NULL, cmd, &args);
367 }
368 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
369
370 static struct notifier_block kgdb_notifier = {
371 .notifier_call = kgdb_mips_notify,
372 };
373
374 /*
375 * Handle the 'c' command
376 */
kgdb_arch_handle_exception(int vector,int signo,int err_code,char * remcom_in_buffer,char * remcom_out_buffer,struct pt_regs * regs)377 int kgdb_arch_handle_exception(int vector, int signo, int err_code,
378 char *remcom_in_buffer, char *remcom_out_buffer,
379 struct pt_regs *regs)
380 {
381 char *ptr;
382 unsigned long address;
383
384 switch (remcom_in_buffer[0]) {
385 case 'c':
386 /* handle the optional parameter */
387 ptr = &remcom_in_buffer[1];
388 if (kgdb_hex2long(&ptr, &address))
389 regs->cp0_epc = address;
390
391 return 0;
392 }
393
394 return -1;
395 }
396
397 struct kgdb_arch arch_kgdb_ops;
398
kgdb_arch_init(void)399 int kgdb_arch_init(void)
400 {
401 union mips_instruction insn = {
402 .r_format = {
403 .opcode = spec_op,
404 .func = break_op,
405 }
406 };
407 memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
408
409 register_die_notifier(&kgdb_notifier);
410
411 return 0;
412 }
413
414 /*
415 * kgdb_arch_exit - Perform any architecture specific uninitalization.
416 *
417 * This function will handle the uninitalization of any architecture
418 * specific callbacks, for dynamic registration and unregistration.
419 */
kgdb_arch_exit(void)420 void kgdb_arch_exit(void)
421 {
422 unregister_die_notifier(&kgdb_notifier);
423 }
424