1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Coredump functionality for Remoteproc framework.
4  *
5  * Copyright (c) 2020, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/devcoredump.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/remoteproc.h>
13 #include "remoteproc_internal.h"
14 #include "remoteproc_elf_helpers.h"
15 
16 struct rproc_coredump_state {
17 	struct rproc *rproc;
18 	void *header;
19 	struct completion dump_done;
20 };
21 
22 /**
23  * rproc_coredump_cleanup() - clean up dump_segments list
24  * @rproc: the remote processor handle
25  */
rproc_coredump_cleanup(struct rproc * rproc)26 void rproc_coredump_cleanup(struct rproc *rproc)
27 {
28 	struct rproc_dump_segment *entry, *tmp;
29 
30 	list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
31 		list_del(&entry->node);
32 		kfree(entry);
33 	}
34 }
35 
36 /**
37  * rproc_coredump_add_segment() - add segment of device memory to coredump
38  * @rproc:	handle of a remote processor
39  * @da:		device address
40  * @size:	size of segment
41  *
42  * Add device memory to the list of segments to be included in a coredump for
43  * the remoteproc.
44  *
45  * Return: 0 on success, negative errno on error.
46  */
rproc_coredump_add_segment(struct rproc * rproc,dma_addr_t da,size_t size)47 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
48 {
49 	struct rproc_dump_segment *segment;
50 
51 	segment = kzalloc(sizeof(*segment), GFP_KERNEL);
52 	if (!segment)
53 		return -ENOMEM;
54 
55 	segment->da = da;
56 	segment->size = size;
57 
58 	list_add_tail(&segment->node, &rproc->dump_segments);
59 
60 	return 0;
61 }
62 EXPORT_SYMBOL(rproc_coredump_add_segment);
63 
64 /**
65  * rproc_coredump_add_custom_segment() - add custom coredump segment
66  * @rproc:	handle of a remote processor
67  * @da:		device address
68  * @size:	size of segment
69  * @dumpfn:	custom dump function called for each segment during coredump
70  * @priv:	private data
71  *
72  * Add device memory to the list of segments to be included in the coredump
73  * and associate the segment with the given custom dump function and private
74  * data.
75  *
76  * Return: 0 on success, negative errno on error.
77  */
rproc_coredump_add_custom_segment(struct rproc * rproc,dma_addr_t da,size_t size,void (* dumpfn)(struct rproc * rproc,struct rproc_dump_segment * segment,void * dest,size_t offset,size_t size),void * priv)78 int rproc_coredump_add_custom_segment(struct rproc *rproc,
79 				      dma_addr_t da, size_t size,
80 				      void (*dumpfn)(struct rproc *rproc,
81 						     struct rproc_dump_segment *segment,
82 						     void *dest, size_t offset,
83 						     size_t size),
84 				      void *priv)
85 {
86 	struct rproc_dump_segment *segment;
87 
88 	segment = kzalloc(sizeof(*segment), GFP_KERNEL);
89 	if (!segment)
90 		return -ENOMEM;
91 
92 	segment->da = da;
93 	segment->size = size;
94 	segment->priv = priv;
95 	segment->dump = dumpfn;
96 
97 	list_add_tail(&segment->node, &rproc->dump_segments);
98 
99 	return 0;
100 }
101 EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
102 
103 /**
104  * rproc_coredump_set_elf_info() - set coredump elf information
105  * @rproc:	handle of a remote processor
106  * @class:	elf class for coredump elf file
107  * @machine:	elf machine for coredump elf file
108  *
109  * Set elf information which will be used for coredump elf file.
110  *
111  * Return: 0 on success, negative errno on error.
112  */
rproc_coredump_set_elf_info(struct rproc * rproc,u8 class,u16 machine)113 int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
114 {
115 	if (class != ELFCLASS64 && class != ELFCLASS32)
116 		return -EINVAL;
117 
118 	rproc->elf_class = class;
119 	rproc->elf_machine = machine;
120 
121 	return 0;
122 }
123 EXPORT_SYMBOL(rproc_coredump_set_elf_info);
124 
rproc_coredump_free(void * data)125 static void rproc_coredump_free(void *data)
126 {
127 	struct rproc_coredump_state *dump_state = data;
128 
129 	vfree(dump_state->header);
130 	complete(&dump_state->dump_done);
131 }
132 
rproc_coredump_find_segment(loff_t user_offset,struct list_head * segments,size_t * data_left)133 static void *rproc_coredump_find_segment(loff_t user_offset,
134 					 struct list_head *segments,
135 					 size_t *data_left)
136 {
137 	struct rproc_dump_segment *segment;
138 
139 	list_for_each_entry(segment, segments, node) {
140 		if (user_offset < segment->size) {
141 			*data_left = segment->size - user_offset;
142 			return segment;
143 		}
144 		user_offset -= segment->size;
145 	}
146 
147 	*data_left = 0;
148 	return NULL;
149 }
150 
rproc_copy_segment(struct rproc * rproc,void * dest,struct rproc_dump_segment * segment,size_t offset,size_t size)151 static void rproc_copy_segment(struct rproc *rproc, void *dest,
152 			       struct rproc_dump_segment *segment,
153 			       size_t offset, size_t size)
154 {
155 	void *ptr;
156 	bool is_iomem;
157 
158 	if (segment->dump) {
159 		segment->dump(rproc, segment, dest, offset, size);
160 	} else {
161 		ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
162 		if (!ptr) {
163 			dev_err(&rproc->dev,
164 				"invalid copy request for segment %pad with offset %zu and size %zu)\n",
165 				&segment->da, offset, size);
166 			memset(dest, 0xff, size);
167 		} else {
168 			if (is_iomem)
169 				memcpy_fromio(dest, ptr, size);
170 			else
171 				memcpy(dest, ptr, size);
172 		}
173 	}
174 }
175 
rproc_coredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t header_sz)176 static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
177 				   void *data, size_t header_sz)
178 {
179 	size_t seg_data, bytes_left = count;
180 	ssize_t copy_sz;
181 	struct rproc_dump_segment *seg;
182 	struct rproc_coredump_state *dump_state = data;
183 	struct rproc *rproc = dump_state->rproc;
184 	void *elfcore = dump_state->header;
185 
186 	/* Copy the vmalloc'ed header first. */
187 	if (offset < header_sz) {
188 		copy_sz = memory_read_from_buffer(buffer, count, &offset,
189 						  elfcore, header_sz);
190 
191 		return copy_sz;
192 	}
193 
194 	/*
195 	 * Find out the segment memory chunk to be copied based on offset.
196 	 * Keep copying data until count bytes are read.
197 	 */
198 	while (bytes_left) {
199 		seg = rproc_coredump_find_segment(offset - header_sz,
200 						  &rproc->dump_segments,
201 						  &seg_data);
202 		/* EOF check */
203 		if (!seg) {
204 			dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
205 				 offset);
206 			break;
207 		}
208 
209 		copy_sz = min_t(size_t, bytes_left, seg_data);
210 
211 		rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
212 				   copy_sz);
213 
214 		offset += copy_sz;
215 		buffer += copy_sz;
216 		bytes_left -= copy_sz;
217 	}
218 
219 	return count - bytes_left;
220 }
221 
222 /**
223  * rproc_coredump() - perform coredump
224  * @rproc:	rproc handle
225  *
226  * This function will generate an ELF header for the registered segments
227  * and create a devcoredump device associated with rproc. Based on the
228  * coredump configuration this function will directly copy the segments
229  * from device memory to userspace or copy segments from device memory to
230  * a separate buffer, which can then be read by userspace.
231  * The first approach avoids using extra vmalloc memory. But it will stall
232  * recovery flow until dump is read by userspace.
233  */
rproc_coredump(struct rproc * rproc)234 void rproc_coredump(struct rproc *rproc)
235 {
236 	struct rproc_dump_segment *segment;
237 	void *phdr;
238 	void *ehdr;
239 	size_t data_size;
240 	size_t offset;
241 	void *data;
242 	u8 class = rproc->elf_class;
243 	int phnum = 0;
244 	struct rproc_coredump_state dump_state;
245 	enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
246 
247 	if (list_empty(&rproc->dump_segments) ||
248 	    dump_conf == RPROC_COREDUMP_DISABLED)
249 		return;
250 
251 	if (class == ELFCLASSNONE) {
252 		dev_err(&rproc->dev, "Elf class is not set\n");
253 		return;
254 	}
255 
256 	data_size = elf_size_of_hdr(class);
257 	list_for_each_entry(segment, &rproc->dump_segments, node) {
258 		/*
259 		 * For default configuration buffer includes headers & segments.
260 		 * For inline dump buffer just includes headers as segments are
261 		 * directly read from device memory.
262 		 */
263 		data_size += elf_size_of_phdr(class);
264 		if (dump_conf == RPROC_COREDUMP_ENABLED)
265 			data_size += segment->size;
266 
267 		phnum++;
268 	}
269 
270 	data = vmalloc(data_size);
271 	if (!data)
272 		return;
273 
274 	ehdr = data;
275 
276 	memset(ehdr, 0, elf_size_of_hdr(class));
277 	/* e_ident field is common for both elf32 and elf64 */
278 	elf_hdr_init_ident(ehdr, class);
279 
280 	elf_hdr_set_e_type(class, ehdr, ET_CORE);
281 	elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
282 	elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
283 	elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
284 	elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
285 	elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
286 	elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
287 	elf_hdr_set_e_phnum(class, ehdr, phnum);
288 
289 	phdr = data + elf_hdr_get_e_phoff(class, ehdr);
290 	offset = elf_hdr_get_e_phoff(class, ehdr);
291 	offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
292 
293 	list_for_each_entry(segment, &rproc->dump_segments, node) {
294 		memset(phdr, 0, elf_size_of_phdr(class));
295 		elf_phdr_set_p_type(class, phdr, PT_LOAD);
296 		elf_phdr_set_p_offset(class, phdr, offset);
297 		elf_phdr_set_p_vaddr(class, phdr, segment->da);
298 		elf_phdr_set_p_paddr(class, phdr, segment->da);
299 		elf_phdr_set_p_filesz(class, phdr, segment->size);
300 		elf_phdr_set_p_memsz(class, phdr, segment->size);
301 		elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
302 		elf_phdr_set_p_align(class, phdr, 0);
303 
304 		if (dump_conf == RPROC_COREDUMP_ENABLED)
305 			rproc_copy_segment(rproc, data + offset, segment, 0,
306 					   segment->size);
307 
308 		offset += elf_phdr_get_p_filesz(class, phdr);
309 		phdr += elf_size_of_phdr(class);
310 	}
311 	if (dump_conf == RPROC_COREDUMP_ENABLED) {
312 		dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
313 		return;
314 	}
315 
316 	/* Initialize the dump state struct to be used by rproc_coredump_read */
317 	dump_state.rproc = rproc;
318 	dump_state.header = data;
319 	init_completion(&dump_state.dump_done);
320 
321 	dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
322 		      rproc_coredump_read, rproc_coredump_free);
323 
324 	/*
325 	 * Wait until the dump is read and free is called. Data is freed
326 	 * by devcoredump framework automatically after 5 minutes.
327 	 */
328 	wait_for_completion(&dump_state.dump_done);
329 }
330 
331 /**
332  * rproc_coredump_using_sections() - perform coredump using section headers
333  * @rproc:	rproc handle
334  *
335  * This function will generate an ELF header for the registered sections of
336  * segments and create a devcoredump device associated with rproc. Based on
337  * the coredump configuration this function will directly copy the segments
338  * from device memory to userspace or copy segments from device memory to
339  * a separate buffer, which can then be read by userspace.
340  * The first approach avoids using extra vmalloc memory. But it will stall
341  * recovery flow until dump is read by userspace.
342  */
rproc_coredump_using_sections(struct rproc * rproc)343 void rproc_coredump_using_sections(struct rproc *rproc)
344 {
345 	struct rproc_dump_segment *segment;
346 	void *shdr;
347 	void *ehdr;
348 	size_t data_size;
349 	size_t strtbl_size = 0;
350 	size_t strtbl_index = 1;
351 	size_t offset;
352 	void *data;
353 	u8 class = rproc->elf_class;
354 	int shnum;
355 	struct rproc_coredump_state dump_state;
356 	unsigned int dump_conf = rproc->dump_conf;
357 	char *str_tbl = "STR_TBL";
358 
359 	if (list_empty(&rproc->dump_segments) ||
360 	    dump_conf == RPROC_COREDUMP_DISABLED)
361 		return;
362 
363 	if (class == ELFCLASSNONE) {
364 		dev_err(&rproc->dev, "Elf class is not set\n");
365 		return;
366 	}
367 
368 	/*
369 	 * We allocate two extra section headers. The first one is null.
370 	 * Second section header is for the string table. Also space is
371 	 * allocated for string table.
372 	 */
373 	data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
374 	shnum = 2;
375 
376 	/* the extra byte is for the null character at index 0 */
377 	strtbl_size += strlen(str_tbl) + 2;
378 
379 	list_for_each_entry(segment, &rproc->dump_segments, node) {
380 		data_size += elf_size_of_shdr(class);
381 		strtbl_size += strlen(segment->priv) + 1;
382 		if (dump_conf == RPROC_COREDUMP_ENABLED)
383 			data_size += segment->size;
384 		shnum++;
385 	}
386 
387 	data_size += strtbl_size;
388 
389 	data = vmalloc(data_size);
390 	if (!data)
391 		return;
392 
393 	ehdr = data;
394 	memset(ehdr, 0, elf_size_of_hdr(class));
395 	/* e_ident field is common for both elf32 and elf64 */
396 	elf_hdr_init_ident(ehdr, class);
397 
398 	elf_hdr_set_e_type(class, ehdr, ET_CORE);
399 	elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
400 	elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
401 	elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
402 	elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
403 	elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
404 	elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
405 	elf_hdr_set_e_shnum(class, ehdr, shnum);
406 	elf_hdr_set_e_shstrndx(class, ehdr, 1);
407 
408 	/*
409 	 * The zeroth index of the section header is reserved and is rarely used.
410 	 * Set the section header as null (SHN_UNDEF) and move to the next one.
411 	 */
412 	shdr = data + elf_hdr_get_e_shoff(class, ehdr);
413 	memset(shdr, 0, elf_size_of_shdr(class));
414 	shdr += elf_size_of_shdr(class);
415 
416 	/* Initialize the string table. */
417 	offset = elf_hdr_get_e_shoff(class, ehdr) +
418 		 elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
419 	memset(data + offset, 0, strtbl_size);
420 
421 	/* Fill in the string table section header. */
422 	memset(shdr, 0, elf_size_of_shdr(class));
423 	elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
424 	elf_shdr_set_sh_offset(class, shdr, offset);
425 	elf_shdr_set_sh_size(class, shdr, strtbl_size);
426 	elf_shdr_set_sh_entsize(class, shdr, 0);
427 	elf_shdr_set_sh_flags(class, shdr, 0);
428 	elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
429 	offset += elf_shdr_get_sh_size(class, shdr);
430 	shdr += elf_size_of_shdr(class);
431 
432 	list_for_each_entry(segment, &rproc->dump_segments, node) {
433 		memset(shdr, 0, elf_size_of_shdr(class));
434 		elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
435 		elf_shdr_set_sh_offset(class, shdr, offset);
436 		elf_shdr_set_sh_addr(class, shdr, segment->da);
437 		elf_shdr_set_sh_size(class, shdr, segment->size);
438 		elf_shdr_set_sh_entsize(class, shdr, 0);
439 		elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
440 		elf_shdr_set_sh_name(class, shdr,
441 				     elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
442 
443 		/* No need to copy segments for inline dumps */
444 		if (dump_conf == RPROC_COREDUMP_ENABLED)
445 			rproc_copy_segment(rproc, data + offset, segment, 0,
446 					   segment->size);
447 		offset += elf_shdr_get_sh_size(class, shdr);
448 		shdr += elf_size_of_shdr(class);
449 	}
450 
451 	if (dump_conf == RPROC_COREDUMP_ENABLED) {
452 		dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
453 		return;
454 	}
455 
456 	/* Initialize the dump state struct to be used by rproc_coredump_read */
457 	dump_state.rproc = rproc;
458 	dump_state.header = data;
459 	init_completion(&dump_state.dump_done);
460 
461 	dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
462 		      rproc_coredump_read, rproc_coredump_free);
463 
464 	/* Wait until the dump is read and free is called. Data is freed
465 	 * by devcoredump framework automatically after 5 minutes.
466 	 */
467 	wait_for_completion(&dump_state.dump_done);
468 }
469 EXPORT_SYMBOL(rproc_coredump_using_sections);
470