1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include <string.h>
15 #include <sdkconfig.h>
16
17 #define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
18 #include "esp_heap_trace.h"
19 #undef HEAP_TRACE_SRCFILE
20
21 #include "esp_attr.h"
22 #include "freertos/FreeRTOS.h"
23 #include "freertos/task.h"
24
25
26 #define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
27
28 #if CONFIG_HEAP_TRACING_STANDALONE
29
30 static portMUX_TYPE trace_mux = portMUX_INITIALIZER_UNLOCKED;
31 static bool tracing;
32 static heap_trace_mode_t mode;
33
34 /* Buffer used for records, starting at offset 0
35 */
36 static heap_trace_record_t *buffer;
37 static size_t total_records;
38
39 /* Count of entries logged in the buffer.
40
41 Maximum total_records
42 */
43 static size_t count;
44
45 /* Actual number of allocations logged */
46 static size_t total_allocations;
47
48 /* Actual number of frees logged */
49 static size_t total_frees;
50
51 /* Has the buffer overflowed and lost trace entries? */
52 static bool has_overflowed = false;
53
heap_trace_init_standalone(heap_trace_record_t * record_buffer,size_t num_records)54 esp_err_t heap_trace_init_standalone(heap_trace_record_t *record_buffer, size_t num_records)
55 {
56 if (tracing) {
57 return ESP_ERR_INVALID_STATE;
58 }
59 buffer = record_buffer;
60 total_records = num_records;
61 memset(buffer, 0, num_records * sizeof(heap_trace_record_t));
62 return ESP_OK;
63 }
64
heap_trace_start(heap_trace_mode_t mode_param)65 esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
66 {
67 if (buffer == NULL || total_records == 0) {
68 return ESP_ERR_INVALID_STATE;
69 }
70
71 portENTER_CRITICAL(&trace_mux);
72
73 tracing = false;
74 mode = mode_param;
75 count = 0;
76 total_allocations = 0;
77 total_frees = 0;
78 has_overflowed = false;
79 heap_trace_resume();
80
81 portEXIT_CRITICAL(&trace_mux);
82 return ESP_OK;
83 }
84
set_tracing(bool enable)85 static esp_err_t set_tracing(bool enable)
86 {
87 if (tracing == enable) {
88 return ESP_ERR_INVALID_STATE;
89 }
90 tracing = enable;
91 return ESP_OK;
92 }
93
heap_trace_stop(void)94 esp_err_t heap_trace_stop(void)
95 {
96 return set_tracing(false);
97 }
98
heap_trace_resume(void)99 esp_err_t heap_trace_resume(void)
100 {
101 return set_tracing(true);
102 }
103
heap_trace_get_count(void)104 size_t heap_trace_get_count(void)
105 {
106 return count;
107 }
108
heap_trace_get(size_t index,heap_trace_record_t * record)109 esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
110 {
111 if (record == NULL) {
112 return ESP_ERR_INVALID_STATE;
113 }
114 esp_err_t result = ESP_OK;
115
116 portENTER_CRITICAL(&trace_mux);
117 if (index >= count) {
118 result = ESP_ERR_INVALID_ARG; /* out of range for 'count' */
119 } else {
120 memcpy(record, &buffer[index], sizeof(heap_trace_record_t));
121 }
122 portEXIT_CRITICAL(&trace_mux);
123 return result;
124 }
125
126
heap_trace_dump(void)127 void heap_trace_dump(void)
128 {
129 size_t delta_size = 0;
130 size_t delta_allocs = 0;
131 printf("%u allocations trace (%u entry buffer)\n",
132 count, total_records);
133 size_t start_count = count;
134 for (int i = 0; i < count; i++) {
135 heap_trace_record_t *rec = &buffer[i];
136
137 if (rec->address != NULL) {
138 printf("%d bytes (@ %p) allocated CPU %d ccount 0x%08x caller ",
139 rec->size, rec->address, rec->ccount & 1, rec->ccount & ~3);
140 for (int j = 0; j < STACK_DEPTH && rec->alloced_by[j] != 0; j++) {
141 printf("%p%s", rec->alloced_by[j],
142 (j < STACK_DEPTH - 1) ? ":" : "");
143 }
144
145 if (mode != HEAP_TRACE_ALL || STACK_DEPTH == 0 || rec->freed_by[0] == NULL) {
146 delta_size += rec->size;
147 delta_allocs++;
148 printf("\n");
149 } else {
150 printf("\nfreed by ");
151 for (int j = 0; j < STACK_DEPTH; j++) {
152 printf("%p%s", rec->freed_by[j],
153 (j < STACK_DEPTH - 1) ? ":" : "\n");
154 }
155 }
156 }
157 }
158 if (mode == HEAP_TRACE_ALL) {
159 printf("%u bytes alive in trace (%u/%u allocations)\n",
160 delta_size, delta_allocs, heap_trace_get_count());
161 } else {
162 printf("%u bytes 'leaked' in trace (%u allocations)\n", delta_size, delta_allocs);
163 }
164 printf("total allocations %u total frees %u\n", total_allocations, total_frees);
165 if (start_count != count) { // only a problem if trace isn't stopped before dumping
166 printf("(NB: New entries were traced while dumping, so trace dump may have duplicate entries.)\n");
167 }
168 if (has_overflowed) {
169 printf("(NB: Buffer has overflowed, so trace data is incomplete.)\n");
170 }
171 }
172
173 /* Add a new allocation to the heap trace records */
record_allocation(const heap_trace_record_t * record)174 static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
175 {
176 if (!tracing || record->address == NULL) {
177 return;
178 }
179
180 portENTER_CRITICAL(&trace_mux);
181 if (tracing) {
182 if (count == total_records) {
183 has_overflowed = true;
184 /* Move the whole buffer back one slot.
185
186 This is a bit slow, compared to treating this buffer as a ringbuffer and rotating a head pointer.
187
188 However, ringbuffer code gets tricky when we remove elements in mid-buffer (for leak trace mode) while
189 trying to keep track of an item count that may overflow.
190 */
191 memmove(&buffer[0], &buffer[1], sizeof(heap_trace_record_t) * (total_records -1));
192 count--;
193 }
194 // Copy new record into place
195 memcpy(&buffer[count], record, sizeof(heap_trace_record_t));
196 count++;
197 total_allocations++;
198 }
199 portEXIT_CRITICAL(&trace_mux);
200 }
201
202 // remove a record, used when freeing
203 static void remove_record(int index);
204
205 /* record a free event in the heap trace log
206
207 For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
208 For HEAP_TRACE_LEAKS, this means removing the record from the log.
209 */
record_free(void * p,void ** callers)210 static IRAM_ATTR void record_free(void *p, void **callers)
211 {
212 if (!tracing || p == NULL) {
213 return;
214 }
215
216 portENTER_CRITICAL(&trace_mux);
217 if (tracing && count > 0) {
218 total_frees++;
219 /* search backwards for the allocation record matching this free */
220 int i;
221 for (i = count - 1; i >= 0; i--) {
222 if (buffer[i].address == p) {
223 break;
224 }
225 }
226
227 if (i >= 0) {
228 if (mode == HEAP_TRACE_ALL) {
229 memcpy(buffer[i].freed_by, callers, sizeof(void *) * STACK_DEPTH);
230 } else { // HEAP_TRACE_LEAKS
231 // Leak trace mode, once an allocation is freed we remove it from the list
232 remove_record(i);
233 }
234 }
235 }
236 portEXIT_CRITICAL(&trace_mux);
237 }
238
239 /* remove the entry at 'index' from the ringbuffer of saved records */
remove_record(int index)240 static IRAM_ATTR void remove_record(int index)
241 {
242 if (index < count - 1) {
243 // Remove the buffer entry from the list
244 memmove(&buffer[index], &buffer[index+1],
245 sizeof(heap_trace_record_t) * (total_records - index - 1));
246 } else {
247 // For last element, just zero it out to avoid ambiguity
248 memset(&buffer[index], 0, sizeof(heap_trace_record_t));
249 }
250 count--;
251 }
252
253 #include "heap_trace.inc"
254
255 #endif /*CONFIG_HEAP_TRACING_STANDALONE*/
256