1 /*
2 * Copyright Meta Platforms, Inc. and its affiliates.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/debug/coredump.h>
8 #include <zephyr/drivers/coredump.h>
9
10 #define DT_DRV_COMPAT zephyr_coredump
11
12 enum COREDUMP_TYPE {
13 COREDUMP_TYPE_MEMCPY = 0,
14 COREDUMP_TYPE_CALLBACK = 1,
15 };
16
17 struct coredump_config {
18 /* Type of coredump device */
19 enum COREDUMP_TYPE type;
20
21 /* Length of memory_regions array */
22 int length;
23
24 /* Memory regions specified in device tree */
25 size_t memory_regions[];
26 };
27
28 struct coredump_data {
29 /* Memory regions registered at run time */
30 sys_slist_t region_list;
31
32 /* Callback to be invoked at time of dump */
33 coredump_dump_callback_t dump_callback;
34 };
35
coredump_impl_dump(const struct device * dev)36 static void coredump_impl_dump(const struct device *dev)
37 {
38 const struct coredump_config *config = dev->config;
39 struct coredump_data *data = dev->data;
40
41 if (config->type == COREDUMP_TYPE_CALLBACK) {
42 if (data->dump_callback) {
43 uintptr_t start_address = config->memory_regions[0];
44 size_t size = config->memory_regions[1];
45
46 /* Invoke callback to allow consumer to fill array with desired data */
47 data->dump_callback(start_address, size);
48 coredump_memory_dump(start_address, start_address + size);
49 }
50 } else { /* COREDUMP_TYPE_MEMCPY */
51 /*
52 * Add each memory region specified in device tree to the core dump,
53 * the memory_regions array should contain two entries per region
54 * containing the start address and size.
55 */
56 if ((config->length > 0) && ((config->length % 2) == 0)) {
57 for (int i = 0; i < config->length; i += 2) {
58 uintptr_t start_address = config->memory_regions[i];
59 size_t size = config->memory_regions[i+1];
60
61 coredump_memory_dump(start_address, start_address + size);
62 }
63 }
64
65 sys_snode_t *node;
66
67 /* Add each memory region registered at runtime to the core dump */
68 SYS_SLIST_FOR_EACH_NODE(&data->region_list, node) {
69 struct coredump_mem_region_node *region;
70
71 region = CONTAINER_OF(node, struct coredump_mem_region_node, node);
72 coredump_memory_dump(region->start, region->start + region->size);
73 }
74 }
75 }
76
coredump_impl_register_memory(const struct device * dev,struct coredump_mem_region_node * region)77 static bool coredump_impl_register_memory(const struct device *dev,
78 struct coredump_mem_region_node *region)
79 {
80 const struct coredump_config *config = dev->config;
81
82 if (config->type == COREDUMP_TYPE_CALLBACK) {
83 return false;
84 }
85
86 struct coredump_data *data = dev->data;
87
88 sys_slist_append(&data->region_list, ®ion->node);
89 return true;
90 }
91
coredump_impl_unregister_memory(const struct device * dev,struct coredump_mem_region_node * region)92 static bool coredump_impl_unregister_memory(const struct device *dev,
93 struct coredump_mem_region_node *region)
94 {
95 const struct coredump_config *config = dev->config;
96
97 if (config->type == COREDUMP_TYPE_CALLBACK) {
98 return false;
99 }
100
101 struct coredump_data *data = dev->data;
102
103 return sys_slist_find_and_remove(&data->region_list, ®ion->node);
104 }
105
coredump_impl_register_callback(const struct device * dev,coredump_dump_callback_t callback)106 static bool coredump_impl_register_callback(const struct device *dev,
107 coredump_dump_callback_t callback)
108 {
109 const struct coredump_config *config = dev->config;
110
111 if (config->type == COREDUMP_TYPE_MEMCPY) {
112 return false;
113 }
114
115 struct coredump_data *data = dev->data;
116
117 data->dump_callback = callback;
118 return true;
119 }
120
coredump_init(const struct device * dev)121 static int coredump_init(const struct device *dev)
122 {
123 struct coredump_data *data = dev->data;
124
125 sys_slist_init(&data->region_list);
126 return 0;
127 }
128
129 static const struct coredump_driver_api coredump_api = {
130 .dump = coredump_impl_dump,
131 .register_memory = coredump_impl_register_memory,
132 .unregister_memory = coredump_impl_unregister_memory,
133 .register_callback = coredump_impl_register_callback,
134 };
135
136 #define INIT_REGION(node_id, prop, idx) DT_PROP_BY_IDX(node_id, prop, idx),
137 #define DT_INST_COREDUMP_IF_TYPE_CALLBACK(n, a, b) \
138 COND_CODE_1(DT_INST_ENUM_IDX(n, coredump_type), a, b)
139
140 #define CREATE_COREDUMP_DEVICE(n) \
141 /* Statially allocate desired memory for the callback type device */ \
142 DT_INST_COREDUMP_IF_TYPE_CALLBACK(n, \
143 ( \
144 BUILD_ASSERT(DT_INST_PROP_LEN(n, memory_regions) == 2, \
145 "Allow exactly one entry (address and size) in memory_regions"); \
146 BUILD_ASSERT(DT_INST_PROP_BY_IDX(n, memory_regions, 0) == 0, \
147 "Verify address is set to 0"); \
148 static uint8_t coredump_bytes[DT_INST_PROP_BY_IDX(n, memory_regions, 1)] \
149 __aligned(4); \
150 ), ()) \
151 static struct coredump_data coredump_data_##n; \
152 static const struct coredump_config coredump_config##n = { \
153 .type = DT_INST_STRING_TOKEN_OR(n, coredump_type, COREDUMP_TYPE_MEMCPY), \
154 COND_CODE_1(DT_INST_NODE_HAS_PROP(n, memory_regions), \
155 ( \
156 .length = DT_INST_PROP_LEN(n, memory_regions), \
157 DT_INST_COREDUMP_IF_TYPE_CALLBACK(n, \
158 ( \
159 /* Callback type device has one entry in memory_regions array */ \
160 .memory_regions = { \
161 (size_t)&coredump_bytes[0], \
162 DT_INST_PROP_BY_IDX(n, memory_regions, 1), \
163 }, \
164 ), \
165 ( \
166 .memory_regions = { \
167 DT_INST_FOREACH_PROP_ELEM(n, memory_regions, INIT_REGION) \
168 }, \
169 )) \
170 ), \
171 ( \
172 .length = 0, \
173 )) \
174 }; \
175 DEVICE_DT_INST_DEFINE(n, \
176 coredump_init, \
177 NULL, \
178 &coredump_data_##n, \
179 &coredump_config##n, \
180 PRE_KERNEL_1, \
181 CONFIG_COREDUMP_DEVICE_INIT_PRIORITY, \
182 &coredump_api);
183
184 DT_INST_FOREACH_STATUS_OKAY(CREATE_COREDUMP_DEVICE)
185