1 // Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <string.h>
15 #include <stdbool.h>
16 #include "sdkconfig.h"
17 #include "soc/soc_memory_layout.h"
18 #include "freertos/FreeRTOS.h"
19 #include "freertos/task_snapshot.h"
20 #include "esp_rom_sys.h"
21 #include "esp_core_dump_port.h"
22 #include "esp_core_dump_common.h"
23 #include "core_dump_elf.h"
24 #include "core_dump_binary.h"
25
26 const static DRAM_ATTR char TAG[] __attribute__((unused)) = "esp_core_dump_common";
27
28 #if CONFIG_ESP_COREDUMP_ENABLE
29
30 #define COREDUMP_GET_MEMORY_SIZE(end, start) (end - start)
31
32 /**
33 * @brief Memory regions to dump, defined at compile time.
34 */
35 extern int _coredump_dram_start;
36 extern int _coredump_dram_end;
37 extern int _coredump_iram_start;
38 extern int _coredump_iram_end;
39 extern int _coredump_rtc_start;
40 extern int _coredump_rtc_end;
41 extern int _coredump_rtc_fast_start;
42 extern int _coredump_rtc_fast_end;
43
44 /**
45 * @brief In the menconfig, it is possible to specify a specific stack size for
46 * core dump generation.
47 */
48 #if CONFIG_ESP_COREDUMP_STACK_SIZE > 0
49
50 /**
51 * @brief If stack size has been specified for the core dump generation, create
52 * a stack that will be used during the whole core dump generation.
53 */
54 #if LOG_LOCAL_LEVEL >= ESP_LOG_DEBUG
55 /* Increase stack size in verbose mode */
56 #define ESP_COREDUMP_STACK_SIZE (CONFIG_ESP_COREDUMP_STACK_SIZE+100)
57 #else
58 #define ESP_COREDUMP_STACK_SIZE CONFIG_ESP_COREDUMP_STACK_SIZE
59 #endif
60
61
62 #define COREDUMP_STACK_FILL_BYTE (0xa5U)
63
64 static uint8_t s_coredump_stack[ESP_COREDUMP_STACK_SIZE];
65 static uint8_t* s_core_dump_sp = NULL;
66 static uint8_t* s_core_dump_backup = NULL;
67
68 /**
69 * @brief Function setting up the core dump stack.
70 *
71 * @note This function **must** be aligned as it modifies the
72 * stack pointer register.
73 */
esp_core_dump_setup_stack(void)74 FORCE_INLINE_ATTR void esp_core_dump_setup_stack(void)
75 {
76 s_core_dump_sp = (uint8_t *)((uint32_t)(s_coredump_stack + ESP_COREDUMP_STACK_SIZE - 1) & ~0xf);
77 memset(s_coredump_stack, COREDUMP_STACK_FILL_BYTE, ESP_COREDUMP_STACK_SIZE);
78
79 /* watchpoint 1 can be used for task stack overflow detection, re-use it, it is no more necessary */
80 //esp_cpu_clear_watchpoint(1);
81 //esp_cpu_set_watchpoint(1, s_coredump_stack, 1, ESP_WATCHPOINT_STORE);
82
83 /* Replace the stack pointer depending on the architecture, but save the
84 * current stack pointer, in order to be able too restore it later.
85 * This function must be inlined. */
86 s_core_dump_backup = esp_core_dump_replace_sp(s_core_dump_sp);
87 ESP_COREDUMP_LOGI("Backing up stack @ %p and use core dump stack @ %p",
88 s_core_dump_backup, esp_cpu_get_sp());
89 }
90
91 /**
92 * @brief Calculate how many bytes are free on the stack set up earlier.
93 *
94 * @return Size, in bytes, of the available space on the stack.
95 */
esp_core_dump_free_stack_space(const uint8_t * pucStackByte)96 FORCE_INLINE_ATTR uint32_t esp_core_dump_free_stack_space(const uint8_t *pucStackByte)
97 {
98 uint32_t ulCount = 0U;
99 while ( ulCount < ESP_COREDUMP_STACK_SIZE &&
100 *pucStackByte == (uint8_t)COREDUMP_STACK_FILL_BYTE )
101 {
102 pucStackByte -= portSTACK_GROWTH;
103 ulCount++;
104 }
105 ulCount /= sizeof(uint8_t);
106 return ulCount;
107 }
108
109 /**
110 * @brief Print how many bytes have been used on the stack to create the core
111 * dump.
112 */
esp_core_dump_report_stack_usage(void)113 FORCE_INLINE_ATTR void esp_core_dump_report_stack_usage(void)
114 {
115 uint32_t bytes_free = esp_core_dump_free_stack_space(s_coredump_stack);
116 ESP_COREDUMP_LOGI("Core dump used %u bytes on stack. %u bytes left free.",
117 s_core_dump_sp - s_coredump_stack - bytes_free, bytes_free);
118
119 /* Restore the stack pointer. */
120 ESP_COREDUMP_LOGI("Restoring stack @ %p", s_core_dump_backup);
121 esp_core_dump_replace_sp(s_core_dump_backup);
122 }
123
124 #else
esp_core_dump_setup_stack(void)125 FORCE_INLINE_ATTR void esp_core_dump_setup_stack(void)
126 {
127 /* If we are in ISR set watchpoint to the end of ISR stack */
128 if (esp_core_dump_in_isr_context()) {
129 uint8_t* topStack = esp_core_dump_get_isr_stack_top();
130 esp_cpu_clear_watchpoint(1);
131 esp_cpu_set_watchpoint(1, topStack+xPortGetCoreID()*configISR_STACK_SIZE, 1, ESP_WATCHPOINT_STORE);
132 } else {
133 /* for tasks user should enable stack overflow detection in menuconfig
134 TODO: if not enabled in menuconfig enable it ourselves */
135 }
136 }
137
138
esp_core_dump_report_stack_usage(void)139 FORCE_INLINE_ATTR void esp_core_dump_report_stack_usage(void)
140 {
141 }
142 #endif
143
144 static void* s_exc_frame = NULL;
145
esp_core_dump_write(panic_info_t * info,core_dump_write_config_t * write_cfg)146 inline void esp_core_dump_write(panic_info_t *info, core_dump_write_config_t *write_cfg)
147 {
148 #ifndef CONFIG_ESP_ENABLE_COREDUMP_TO_NONE
149 esp_err_t err = ESP_ERR_NOT_SUPPORTED;
150 s_exc_frame = (void*) info->frame;
151
152 esp_core_dump_setup_stack();
153 esp_core_dump_port_init(info);
154 #if CONFIG_ESP_COREDUMP_DATA_FORMAT_BIN
155 err = esp_core_dump_write_binary(write_cfg);
156 #elif CONFIG_ESP_COREDUMP_DATA_FORMAT_ELF
157 err = esp_core_dump_write_elf(write_cfg);
158 #endif
159 if (err != ESP_OK) {
160 ESP_COREDUMP_LOGE("Core dump write binary failed with error=%d", err);
161 }
162 esp_core_dump_report_stack_usage();
163 #endif
164 }
165
esp_core_dump_init(void)166 void __attribute__((weak)) esp_core_dump_init(void)
167 {
168 /* do nothing by default */
169 }
170
171 /**
172 * Common functions related to core dump generation.
173 */
esp_core_dump_switch_task_stack_to_isr(core_dump_task_header_t * task,core_dump_mem_seg_header_t * stack)174 static void esp_core_dump_switch_task_stack_to_isr(core_dump_task_header_t *task,
175 core_dump_mem_seg_header_t *stack)
176 {
177 if (stack != NULL) {
178 stack->start = task->stack_start;
179 stack->size = esp_core_dump_get_memory_len(task->stack_start, task->stack_end);
180 }
181 task->stack_start = (uint32_t) s_exc_frame;
182 task->stack_end = esp_core_dump_get_isr_stack_end();
183 ESP_COREDUMP_LOG_PROCESS("Switched task %x to ISR stack [%x...%x]", task->tcb_addr,
184 task->stack_start,
185 task->stack_end);
186 }
187
esp_core_dump_reset_tasks_snapshots_iter(void)188 inline void esp_core_dump_reset_tasks_snapshots_iter(void)
189 {
190 esp_core_dump_reset_fake_stacks();
191 }
192
esp_core_dump_get_next_task(void * handle)193 inline void *esp_core_dump_get_next_task(void *handle)
194 {
195 return pxTaskGetNext(handle);
196 }
197
esp_core_dump_get_task_snapshot(void * handle,core_dump_task_header_t * task,core_dump_mem_seg_header_t * interrupted_stack)198 bool esp_core_dump_get_task_snapshot(void *handle, core_dump_task_header_t *task,
199 core_dump_mem_seg_header_t *interrupted_stack)
200 {
201 TaskSnapshot_t rtos_snapshot = { 0 };
202
203 if (interrupted_stack != NULL) {
204 interrupted_stack->size = 0;
205 }
206
207 vTaskGetSnapshot(handle, &rtos_snapshot);
208 task->tcb_addr = handle;
209 task->stack_start = (uint32_t)rtos_snapshot.pxTopOfStack;
210 task->stack_end = (uint32_t)rtos_snapshot.pxEndOfStack;
211
212 if (!xPortInterruptedFromISRContext() && handle == esp_core_dump_get_current_task_handle()) {
213 // Set correct stack top for current task; only modify if we came from the task,
214 // and not an ISR that crashed.
215 task->stack_start = (uint32_t) s_exc_frame;
216 }
217 if (!esp_core_dump_check_task(task)) {
218 ESP_COREDUMP_LOG_PROCESS("Task %x is broken!", handle);
219 return false;
220 }
221 if (handle == esp_core_dump_get_current_task_handle()) {
222 ESP_COREDUMP_LOG_PROCESS("Crashed task %x", handle);
223 esp_core_dump_port_set_crashed_tcb((uint32_t)handle);
224 if (xPortInterruptedFromISRContext()) {
225 esp_core_dump_switch_task_stack_to_isr(task, interrupted_stack);
226 }
227 }
228 return true;
229 }
230
esp_core_dump_get_user_ram_segments(void)231 uint32_t esp_core_dump_get_user_ram_segments(void)
232 {
233 uint32_t total_sz = 0;
234
235 // count number of memory segments to insert into ELF structure
236 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_dram_end, &_coredump_dram_start) > 0 ? 1 : 0;
237 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_rtc_end, &_coredump_rtc_start) > 0 ? 1 : 0;
238 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_rtc_fast_end, &_coredump_rtc_fast_start) > 0 ? 1 : 0;
239 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_iram_end, &_coredump_iram_start) > 0 ? 1 : 0;
240
241 return total_sz;
242 }
243
esp_core_dump_get_user_ram_size(void)244 uint32_t esp_core_dump_get_user_ram_size(void)
245 {
246 uint32_t total_sz = 0;
247
248 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_dram_end, &_coredump_dram_start);
249 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_rtc_end, &_coredump_rtc_start);
250 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_rtc_fast_end, &_coredump_rtc_fast_start);
251 total_sz += COREDUMP_GET_MEMORY_SIZE(&_coredump_iram_end, &_coredump_iram_start);
252
253 return total_sz;
254 }
255
esp_core_dump_get_user_ram_info(coredump_region_t region,uint32_t * start)256 int esp_core_dump_get_user_ram_info(coredump_region_t region, uint32_t *start)
257 {
258 int total_sz = -1;
259
260 ESP_COREDUMP_DEBUG_ASSERT(start != NULL);
261
262 switch (region) {
263 case COREDUMP_MEMORY_DRAM:
264 *start = (uint32_t)&_coredump_dram_start;
265 total_sz = (uint8_t *)&_coredump_dram_end - (uint8_t *)&_coredump_dram_start;
266 break;
267
268 case COREDUMP_MEMORY_IRAM:
269 *start = (uint32_t)&_coredump_iram_start;
270 total_sz = (uint8_t *)&_coredump_iram_end - (uint8_t *)&_coredump_iram_start;
271 break;
272
273 case COREDUMP_MEMORY_RTC:
274 *start = (uint32_t)&_coredump_rtc_start;
275 total_sz = (uint8_t *)&_coredump_rtc_end - (uint8_t *)&_coredump_rtc_start;
276 break;
277
278 case COREDUMP_MEMORY_RTC_FAST:
279 *start = (uint32_t)&_coredump_rtc_fast_start;
280 total_sz = (uint8_t *)&_coredump_rtc_fast_end - (uint8_t *)&_coredump_rtc_fast_start;
281 break;
282
283 default:
284 break;
285 }
286
287 return total_sz;
288 }
289
esp_core_dump_tcb_addr_is_sane(uint32_t addr)290 inline bool esp_core_dump_tcb_addr_is_sane(uint32_t addr)
291 {
292 return esp_core_dump_mem_seg_is_sane(addr, esp_core_dump_get_tcb_len());
293 }
294
esp_core_dump_in_isr_context(void)295 inline bool esp_core_dump_in_isr_context(void)
296 {
297 return xPortInterruptedFromISRContext();
298 }
299
esp_core_dump_get_current_task_handle()300 inline core_dump_task_handle_t esp_core_dump_get_current_task_handle()
301 {
302 return (core_dump_task_handle_t) xTaskGetCurrentTaskHandleForCPU(xPortGetCoreID());
303 }
304
305 #endif
306