1 /*
2  * Copyright (c) 2020 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <kernel_internal.h>
9 #include <zephyr/toolchain.h>
10 #include <zephyr/debug/coredump.h>
11 #include <zephyr/sys/byteorder.h>
12 #include <zephyr/sys/util.h>
13 
14 #include "coredump_internal.h"
15 #if defined(CONFIG_DEBUG_COREDUMP_BACKEND_LOGGING)
16 extern struct coredump_backend_api coredump_backend_logging;
17 static struct coredump_backend_api
18 	*backend_api = &coredump_backend_logging;
19 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_FLASH_PARTITION)
20 extern struct coredump_backend_api coredump_backend_flash_partition;
21 static struct coredump_backend_api
22 	*backend_api = &coredump_backend_flash_partition;
23 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_INTEL_ADSP_MEM_WINDOW)
24 extern struct coredump_backend_api coredump_backend_intel_adsp_mem_window;
25 static struct coredump_backend_api
26 	*backend_api = &coredump_backend_intel_adsp_mem_window;
27 #elif defined(CONFIG_DEBUG_COREDUMP_BACKEND_OTHER)
28 extern struct coredump_backend_api coredump_backend_other;
29 static struct coredump_backend_api
30 	*backend_api = &coredump_backend_other;
31 #else
32 #error "Need to select a coredump backend"
33 #endif
34 
35 #if defined(CONFIG_COREDUMP_DEVICE)
36 #include <zephyr/drivers/coredump.h>
37 #define DT_DRV_COMPAT zephyr_coredump
38 #endif
39 
40 #if defined(CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK)
arch_coredump_priv_stack_dump(struct k_thread * thread)41 __weak void arch_coredump_priv_stack_dump(struct k_thread *thread)
42 {
43 	/* Stub if architecture has not implemented this. */
44 	ARG_UNUSED(thread);
45 }
46 #endif /* CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK */
47 
dump_header(unsigned int reason)48 static void dump_header(unsigned int reason)
49 {
50 	struct coredump_hdr_t hdr = {
51 		.id = {'Z', 'E'},
52 		.hdr_version = COREDUMP_HDR_VER,
53 		.reason = sys_cpu_to_le16(reason),
54 	};
55 
56 	if (sizeof(uintptr_t) == 8) {
57 		hdr.ptr_size_bits = 6; /* 2^6 = 64 */
58 	} else if (sizeof(uintptr_t) == 4) {
59 		hdr.ptr_size_bits = 5; /* 2^5 = 32 */
60 	} else {
61 		hdr.ptr_size_bits = 0; /* Unknown */
62 	}
63 
64 	hdr.tgt_code = sys_cpu_to_le16(arch_coredump_tgt_code_get());
65 
66 	backend_api->buffer_output((uint8_t *)&hdr, sizeof(hdr));
67 }
68 
69 #if defined(CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_MIN) || \
70 	defined(CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS)
dump_thread(struct k_thread * thread)71 static void dump_thread(struct k_thread *thread)
72 {
73 	uintptr_t end_addr;
74 
75 	/*
76 	 * When dumping minimum information,
77 	 * the current thread struct and stack need to
78 	 * be dumped so debugger can examine them.
79 	 */
80 
81 	if (thread == NULL) {
82 		return;
83 	}
84 
85 	end_addr = POINTER_TO_UINT(thread) + sizeof(*thread);
86 
87 	coredump_memory_dump(POINTER_TO_UINT(thread), end_addr);
88 
89 	end_addr = thread->stack_info.start + thread->stack_info.size;
90 
91 	coredump_memory_dump(thread->stack_info.start, end_addr);
92 
93 #if defined(CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK)
94 	if ((thread->base.user_options & K_USER) == K_USER) {
95 		arch_coredump_priv_stack_dump(thread);
96 	}
97 #endif /* CONFIG_DEBUG_COREDUMP_DUMP_THREAD_PRIV_STACK */
98 }
99 #endif
100 
101 #if defined(CONFIG_COREDUMP_DEVICE)
process_coredump_dev_memory(const struct device * dev)102 static void process_coredump_dev_memory(const struct device *dev)
103 {
104 	DEVICE_API_GET(coredump, dev)->dump(dev);
105 }
106 #endif
107 
process_memory_region_list(void)108 void process_memory_region_list(void)
109 {
110 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_LINKER_RAM
111 	unsigned int idx = 0;
112 
113 	while (true) {
114 		struct z_coredump_memory_region_t *r =
115 			&z_coredump_memory_regions[idx];
116 
117 		if (r->end == POINTER_TO_UINT(NULL)) {
118 			break;
119 		}
120 
121 		coredump_memory_dump(r->start, r->end);
122 
123 		idx++;
124 	}
125 #endif
126 
127 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS
128 	/*
129 	 * Content of _kernel.threads not being modified during dump
130 	 * capture so no need to lock z_thread_monitor_lock.
131 	 */
132 	struct k_thread *current;
133 
134 	for (current = _kernel.threads; current; current = current->next_thread) {
135 		dump_thread(current);
136 	}
137 
138 	/* Also add interrupt stack, in case error occurred in an interrupt */
139 	char *irq_stack = _kernel.cpus[0].irq_stack;
140 	uintptr_t start_addr = POINTER_TO_UINT(irq_stack) - CONFIG_ISR_STACK_SIZE;
141 
142 	coredump_memory_dump(start_addr, POINTER_TO_UINT(irq_stack));
143 #endif /* CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_THREADS */
144 
145 #if defined(CONFIG_COREDUMP_DEVICE)
146 #define MY_FN(inst) process_coredump_dev_memory(DEVICE_DT_INST_GET(inst));
147 	DT_INST_FOREACH_STATUS_OKAY(MY_FN)
148 #endif
149 }
150 
151 #ifdef CONFIG_DEBUG_COREDUMP_THREADS_METADATA
dump_threads_metadata(void)152 static void dump_threads_metadata(void)
153 {
154 	struct coredump_threads_meta_hdr_t hdr = {
155 		.id = THREADS_META_HDR_ID,
156 		.hdr_version = THREADS_META_HDR_VER,
157 		.num_bytes = 0,
158 	};
159 
160 	hdr.num_bytes += sizeof(_kernel);
161 
162 	coredump_buffer_output((uint8_t *)&hdr, sizeof(hdr));
163 	coredump_buffer_output((uint8_t *)&_kernel, sizeof(_kernel));
164 }
165 #endif /* CONFIG_DEBUG_COREDUMP_THREADS_METADATA */
166 
coredump(unsigned int reason,const struct arch_esf * esf,struct k_thread * thread)167 void coredump(unsigned int reason, const struct arch_esf *esf,
168 	      struct k_thread *thread)
169 {
170 	z_coredump_start();
171 
172 	dump_header(reason);
173 
174 	if (esf != NULL) {
175 		arch_coredump_info_dump(esf);
176 	}
177 
178 #ifdef CONFIG_DEBUG_COREDUMP_THREADS_METADATA
179 	dump_threads_metadata();
180 #endif
181 
182 	if (thread != NULL) {
183 #ifdef CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_MIN
184 		dump_thread(thread);
185 #endif
186 	}
187 
188 	process_memory_region_list();
189 
190 	z_coredump_end();
191 }
192 
z_coredump_start(void)193 void z_coredump_start(void)
194 {
195 	backend_api->start();
196 }
197 
z_coredump_end(void)198 void z_coredump_end(void)
199 {
200 	backend_api->end();
201 }
202 
coredump_buffer_output(uint8_t * buf,size_t buflen)203 void coredump_buffer_output(uint8_t *buf, size_t buflen)
204 {
205 	if ((buf == NULL) || (buflen == 0)) {
206 		/* Invalid buffer, skip */
207 		return;
208 	}
209 
210 	backend_api->buffer_output(buf, buflen);
211 }
212 
coredump_memory_dump(uintptr_t start_addr,uintptr_t end_addr)213 void coredump_memory_dump(uintptr_t start_addr, uintptr_t end_addr)
214 {
215 	struct coredump_mem_hdr_t m;
216 	size_t len;
217 
218 	if ((start_addr == POINTER_TO_UINT(NULL)) ||
219 	    (end_addr == POINTER_TO_UINT(NULL))) {
220 		return;
221 	}
222 
223 	if (start_addr >= end_addr) {
224 		return;
225 	}
226 
227 	len = end_addr - start_addr;
228 
229 	m.id = COREDUMP_MEM_HDR_ID;
230 	m.hdr_version = COREDUMP_MEM_HDR_VER;
231 
232 	if (sizeof(uintptr_t) == 8) {
233 		m.start	= sys_cpu_to_le64(start_addr);
234 		m.end = sys_cpu_to_le64(end_addr);
235 	} else if (sizeof(uintptr_t) == 4) {
236 		m.start	= sys_cpu_to_le32(start_addr);
237 		m.end = sys_cpu_to_le32(end_addr);
238 	}
239 
240 	coredump_buffer_output((uint8_t *)&m, sizeof(m));
241 
242 	coredump_buffer_output((uint8_t *)start_addr, len);
243 }
244 
coredump_query(enum coredump_query_id query_id,void * arg)245 int coredump_query(enum coredump_query_id query_id, void *arg)
246 {
247 	int ret;
248 
249 	if (backend_api->query == NULL) {
250 		ret = -ENOTSUP;
251 	} else {
252 		ret = backend_api->query(query_id, arg);
253 	}
254 
255 	return ret;
256 }
257 
coredump_cmd(enum coredump_cmd_id cmd_id,void * arg)258 int coredump_cmd(enum coredump_cmd_id cmd_id, void *arg)
259 {
260 	int ret;
261 
262 	if (backend_api->cmd == NULL) {
263 		ret = -ENOTSUP;
264 	} else {
265 		ret = backend_api->cmd(cmd_id, arg);
266 	}
267 
268 	return ret;
269 }
270