1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Arduino SA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/elf.h>
10 #include <zephyr/llext/loader.h>
11 #include <zephyr/llext/llext.h>
12 #include <zephyr/llext/llext_internal.h>
13 #include <zephyr/kernel.h>
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
17 
18 #include <string.h>
19 
20 #include "llext_priv.h"
21 
22 /*
23  * NOTICE: Functions in this file do not clean up allocations in their error
24  * paths; instead, this is performed once and for all when leaving the parent
25  * `do_llext_load()` function. This approach consolidates memory management
26  * in a single place, simplifying error handling and reducing the risk of
27  * memory leaks.
28  *
29  * The following rationale applies:
30  *
31  * - The input `struct llext` and fields in `struct loader` are zero-filled
32  *   at the beginning of the do_llext_load function, so that every pointer is
33  *   set to NULL and every bool is false.
34  * - If some function called by do_llext_load allocates memory, it does so by
35  *   immediately writing the pointer in the `ext` and `ldr` structures.
36  * - do_llext_load() will clean up the memory allocated by the functions it
37  *   calls, taking into account if the load process was successful or not.
38  */
39 
40 static const char ELF_MAGIC[] = {0x7f, 'E', 'L', 'F'};
41 
llext_loaded_sect_ptr(struct llext_loader * ldr,struct llext * ext,unsigned int sh_ndx)42 const void *llext_loaded_sect_ptr(struct llext_loader *ldr, struct llext *ext, unsigned int sh_ndx)
43 {
44 	enum llext_mem mem_idx = ldr->sect_map[sh_ndx].mem_idx;
45 
46 	if (mem_idx == LLEXT_MEM_COUNT) {
47 		return NULL;
48 	}
49 
50 	return (const uint8_t *)ext->mem[mem_idx] + ldr->sect_map[sh_ndx].offset;
51 }
52 
53 /*
54  * Load basic ELF file data
55  */
56 
llext_load_elf_data(struct llext_loader * ldr,struct llext * ext)57 static int llext_load_elf_data(struct llext_loader *ldr, struct llext *ext)
58 {
59 	int ret;
60 
61 	/* read ELF header */
62 
63 	ret = llext_seek(ldr, 0);
64 	if (ret != 0) {
65 		LOG_ERR("Failed to seek for ELF header");
66 		return ret;
67 	}
68 
69 	ret = llext_read(ldr, &ldr->hdr, sizeof(ldr->hdr));
70 	if (ret != 0) {
71 		LOG_ERR("Failed to read ELF header");
72 		return ret;
73 	}
74 
75 	/* check whether this is a valid ELF file */
76 	if (memcmp(ldr->hdr.e_ident, ELF_MAGIC, sizeof(ELF_MAGIC)) != 0) {
77 		LOG_HEXDUMP_ERR(ldr->hdr.e_ident, 16, "Invalid ELF, magic does not match");
78 		return -ENOEXEC;
79 	}
80 
81 	switch (ldr->hdr.e_type) {
82 	case ET_REL:
83 		LOG_DBG("Loading relocatable ELF");
84 		break;
85 
86 	case ET_DYN:
87 		LOG_DBG("Loading shared ELF");
88 		break;
89 
90 	default:
91 		LOG_ERR("Unsupported ELF file type %x", ldr->hdr.e_type);
92 		return -ENOEXEC;
93 	}
94 
95 	/*
96 	 * Read all ELF section headers and initialize maps.  Buffers allocated
97 	 * below are freed when leaving do_llext_load(), so don't count them in
98 	 * alloc_size.
99 	 */
100 
101 	if (ldr->hdr.e_shentsize != sizeof(elf_shdr_t)) {
102 		LOG_ERR("Invalid section header size %d", ldr->hdr.e_shentsize);
103 		return -ENOEXEC;
104 	}
105 
106 	ext->sect_cnt = ldr->hdr.e_shnum;
107 
108 	size_t sect_map_sz = ext->sect_cnt * sizeof(ldr->sect_map[0]);
109 
110 	ldr->sect_map = llext_alloc(sect_map_sz);
111 	if (!ldr->sect_map) {
112 		LOG_ERR("Failed to allocate section map, size %zu", sect_map_sz);
113 		return -ENOMEM;
114 	}
115 	for (int i = 0; i < ext->sect_cnt; i++) {
116 		ldr->sect_map[i].mem_idx = LLEXT_MEM_COUNT;
117 		ldr->sect_map[i].offset = 0;
118 	}
119 
120 	ext->sect_hdrs = (elf_shdr_t *)llext_peek(ldr, ldr->hdr.e_shoff);
121 	if (ext->sect_hdrs) {
122 		ext->sect_hdrs_on_heap = false;
123 	} else {
124 		size_t sect_hdrs_sz = ext->sect_cnt * sizeof(ext->sect_hdrs[0]);
125 
126 		ext->sect_hdrs_on_heap = true;
127 		ext->sect_hdrs = llext_alloc(sect_hdrs_sz);
128 		if (!ext->sect_hdrs) {
129 			LOG_ERR("Failed to allocate section headers, size %zu", sect_hdrs_sz);
130 			return -ENOMEM;
131 		}
132 
133 		ret = llext_seek(ldr, ldr->hdr.e_shoff);
134 		if (ret != 0) {
135 			LOG_ERR("Failed to seek for section headers");
136 			return ret;
137 		}
138 
139 		ret = llext_read(ldr, ext->sect_hdrs, sect_hdrs_sz);
140 		if (ret != 0) {
141 			LOG_ERR("Failed to read section headers");
142 			return ret;
143 		}
144 	}
145 
146 	return 0;
147 }
148 
149 /*
150  * Find all relevant string and symbol tables
151  */
llext_find_tables(struct llext_loader * ldr,struct llext * ext)152 static int llext_find_tables(struct llext_loader *ldr, struct llext *ext)
153 {
154 	int table_cnt, i;
155 
156 	memset(ldr->sects, 0, sizeof(ldr->sects));
157 
158 	/* Find symbol and string tables */
159 	for (i = 0, table_cnt = 0; i < ext->sect_cnt && table_cnt < 3; ++i) {
160 		elf_shdr_t *shdr = ext->sect_hdrs + i;
161 
162 		LOG_DBG("section %d at 0x%zx: name %d, type %d, flags 0x%zx, "
163 			"addr 0x%zx, size %zd, link %d, info %d",
164 			i,
165 			(size_t)shdr->sh_offset,
166 			shdr->sh_name,
167 			shdr->sh_type,
168 			(size_t)shdr->sh_flags,
169 			(size_t)shdr->sh_addr,
170 			(size_t)shdr->sh_size,
171 			shdr->sh_link,
172 			shdr->sh_info);
173 
174 		switch (shdr->sh_type) {
175 		case SHT_SYMTAB:
176 		case SHT_DYNSYM:
177 			LOG_DBG("symtab at %d", i);
178 			ldr->sects[LLEXT_MEM_SYMTAB] = *shdr;
179 			ldr->sect_map[i].mem_idx = LLEXT_MEM_SYMTAB;
180 			table_cnt++;
181 			break;
182 		case SHT_STRTAB:
183 			if (ldr->hdr.e_shstrndx == i) {
184 				LOG_DBG("shstrtab at %d", i);
185 				ldr->sects[LLEXT_MEM_SHSTRTAB] = *shdr;
186 				ldr->sect_map[i].mem_idx = LLEXT_MEM_SHSTRTAB;
187 			} else {
188 				LOG_DBG("strtab at %d", i);
189 				ldr->sects[LLEXT_MEM_STRTAB] = *shdr;
190 				ldr->sect_map[i].mem_idx = LLEXT_MEM_STRTAB;
191 			}
192 			table_cnt++;
193 			break;
194 		default:
195 			break;
196 		}
197 	}
198 
199 	if (!ldr->sects[LLEXT_MEM_SHSTRTAB].sh_type ||
200 	    !ldr->sects[LLEXT_MEM_STRTAB].sh_type ||
201 	    !ldr->sects[LLEXT_MEM_SYMTAB].sh_type) {
202 		LOG_ERR("Some sections are missing or present multiple times!");
203 		return -ENOEXEC;
204 	}
205 
206 	return 0;
207 }
208 
209 /*
210  * Maps the ELF sections into regions according to their usage flags,
211  * calculating ldr->sects and ldr->sect_map.
212  */
llext_map_sections(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)213 static int llext_map_sections(struct llext_loader *ldr, struct llext *ext,
214 			      const struct llext_load_param *ldr_parm)
215 {
216 	int i, j;
217 	const char *name;
218 
219 	for (i = 0; i < ext->sect_cnt; ++i) {
220 		elf_shdr_t *shdr = ext->sect_hdrs + i;
221 
222 		name = llext_string(ldr, ext, LLEXT_MEM_SHSTRTAB, shdr->sh_name);
223 
224 		if (ldr->sect_map[i].mem_idx != LLEXT_MEM_COUNT) {
225 			LOG_DBG("section %d name %s already mapped to region %d",
226 				i, name, ldr->sect_map[i].mem_idx);
227 			continue;
228 		}
229 
230 		/* Identify the section type by its flags */
231 		enum llext_mem mem_idx;
232 
233 		switch (shdr->sh_type) {
234 		case SHT_NOBITS:
235 			mem_idx = LLEXT_MEM_BSS;
236 			break;
237 		case SHT_PROGBITS:
238 			if (shdr->sh_flags & SHF_EXECINSTR) {
239 				mem_idx = LLEXT_MEM_TEXT;
240 			} else if (shdr->sh_flags & SHF_WRITE) {
241 				mem_idx = LLEXT_MEM_DATA;
242 			} else {
243 				mem_idx = LLEXT_MEM_RODATA;
244 			}
245 			break;
246 		case SHT_PREINIT_ARRAY:
247 			mem_idx = LLEXT_MEM_PREINIT;
248 			break;
249 		case SHT_INIT_ARRAY:
250 			mem_idx = LLEXT_MEM_INIT;
251 			break;
252 		case SHT_FINI_ARRAY:
253 			mem_idx = LLEXT_MEM_FINI;
254 			break;
255 		default:
256 			mem_idx = LLEXT_MEM_COUNT;
257 			break;
258 		}
259 
260 		/* Special exception for .exported_sym */
261 		if (strcmp(name, ".exported_sym") == 0) {
262 			mem_idx = LLEXT_MEM_EXPORT;
263 		}
264 
265 		if (mem_idx == LLEXT_MEM_COUNT ||
266 		    !(shdr->sh_flags & SHF_ALLOC) ||
267 		    shdr->sh_size == 0) {
268 			LOG_DBG("section %d name %s skipped", i, name);
269 			continue;
270 		}
271 
272 		switch (mem_idx) {
273 		case LLEXT_MEM_PREINIT:
274 		case LLEXT_MEM_INIT:
275 		case LLEXT_MEM_FINI:
276 			if (shdr->sh_entsize != sizeof(void *) ||
277 			    shdr->sh_size % shdr->sh_entsize != 0) {
278 				LOG_ERR("Invalid %s array in section %d", name, i);
279 				return -ENOEXEC;
280 			}
281 		default:
282 			break;
283 		}
284 
285 		LOG_DBG("section %d name %s maps to region %d", i, name, mem_idx);
286 
287 		ldr->sect_map[i].mem_idx = mem_idx;
288 		elf_shdr_t *region = ldr->sects + mem_idx;
289 
290 		/*
291 		 * ELF objects can have sections for memory regions, detached from
292 		 * other sections of the same type. E.g. executable sections that will be
293 		 * placed in slower memory. Don't merge such sections into main regions
294 		 */
295 		if (ldr_parm->section_detached && ldr_parm->section_detached(shdr)) {
296 			continue;
297 		}
298 
299 		if (region->sh_type == SHT_NULL) {
300 			/* First section of this type, copy all info to the
301 			 * region descriptor.
302 			 */
303 			memcpy(region, shdr, sizeof(*region));
304 		} else {
305 			/* Make sure this section is compatible with the region */
306 			if ((shdr->sh_flags & SHF_BASIC_TYPE_MASK) !=
307 			    (region->sh_flags & SHF_BASIC_TYPE_MASK)) {
308 				LOG_ERR("Unsupported section flags %#x / %#x for %s (region %d)",
309 					(uint32_t)shdr->sh_flags, (uint32_t)region->sh_flags,
310 					name, mem_idx);
311 				return -ENOEXEC;
312 			}
313 
314 			/* Check if this region type is extendable */
315 			switch (mem_idx) {
316 			case LLEXT_MEM_BSS:
317 				/* SHT_NOBITS sections cannot be merged properly:
318 				 * as they use no space in the file, the logic
319 				 * below does not work; they must be treated as
320 				 * independent entities.
321 				 */
322 				LOG_ERR("Multiple SHT_NOBITS sections are not supported");
323 				return -ENOTSUP;
324 			case LLEXT_MEM_PREINIT:
325 			case LLEXT_MEM_INIT:
326 			case LLEXT_MEM_FINI:
327 				/* These regions are not extendable and must be
328 				 * referenced at most once in the ELF file.
329 				 */
330 				LOG_ERR("Region %d redefined", mem_idx);
331 				return -ENOEXEC;
332 			default:
333 				break;
334 			}
335 
336 			if (ldr->hdr.e_type == ET_DYN) {
337 				/* In shared objects, sh_addr is the VMA.
338 				 * Before merging this section in the region,
339 				 * make sure the delta in VMAs matches that of
340 				 * file offsets.
341 				 */
342 				if (shdr->sh_addr - region->sh_addr !=
343 				    shdr->sh_offset - region->sh_offset) {
344 					LOG_ERR("Incompatible section addresses "
345 						"for %s (region %d)", name, mem_idx);
346 					return -ENOEXEC;
347 				}
348 			}
349 
350 			/*
351 			 * Extend the current region to include the new section
352 			 * (overlaps are detected later)
353 			 */
354 			size_t address = MIN(region->sh_addr, shdr->sh_addr);
355 			size_t bot_ofs = MIN(region->sh_offset, shdr->sh_offset);
356 			size_t top_ofs = MAX(region->sh_offset + region->sh_size,
357 					     shdr->sh_offset + shdr->sh_size);
358 
359 			region->sh_addr = address;
360 			region->sh_offset = bot_ofs;
361 			region->sh_size = top_ofs - bot_ofs;
362 		}
363 	}
364 
365 	/*
366 	 * Test that no computed region overlaps. This can happen if sections of
367 	 * different llext_mem type are interleaved in the ELF file or in VMAs.
368 	 */
369 	for (i = 0; i < LLEXT_MEM_COUNT; i++) {
370 		for (j = i+1; j < LLEXT_MEM_COUNT; j++) {
371 			elf_shdr_t *x = ldr->sects + i;
372 			elf_shdr_t *y = ldr->sects + j;
373 
374 			if (x->sh_type == SHT_NULL || x->sh_size == 0 ||
375 			    y->sh_type == SHT_NULL || y->sh_size == 0) {
376 				/* Skip empty regions */
377 				continue;
378 			}
379 
380 			/*
381 			 * The export symbol table may be surrounded by
382 			 * other data sections. Ignore overlaps in that
383 			 * case.
384 			 */
385 			if ((i == LLEXT_MEM_DATA || i == LLEXT_MEM_RODATA) &&
386 			    j == LLEXT_MEM_EXPORT) {
387 				continue;
388 			}
389 
390 			/*
391 			 * Exported symbols region can also overlap
392 			 * with rodata.
393 			 */
394 			if (i == LLEXT_MEM_EXPORT || j == LLEXT_MEM_EXPORT) {
395 				continue;
396 			}
397 
398 			if (ldr->hdr.e_type == ET_DYN) {
399 				/*
400 				 * Test all merged VMA ranges for overlaps
401 				 */
402 				if ((x->sh_addr <= y->sh_addr &&
403 				     x->sh_addr + x->sh_size > y->sh_addr) ||
404 				    (y->sh_addr <= x->sh_addr &&
405 				     y->sh_addr + y->sh_size > x->sh_addr)) {
406 					LOG_ERR("Region %d VMA range (0x%zx +%zd) "
407 						"overlaps with %d (0x%zx +%zd)",
408 						i, (size_t)x->sh_addr, (size_t)x->sh_size,
409 						j, (size_t)y->sh_addr, (size_t)y->sh_size);
410 					return -ENOEXEC;
411 				}
412 			}
413 
414 			/*
415 			 * Test file offsets. BSS sections store no
416 			 * data in the file and must not be included
417 			 * in checks to avoid false positives.
418 			 */
419 			if (i == LLEXT_MEM_BSS || j == LLEXT_MEM_BSS) {
420 				continue;
421 			}
422 
423 			if ((x->sh_offset <= y->sh_offset &&
424 			     x->sh_offset + x->sh_size > y->sh_offset) ||
425 			    (y->sh_offset <= x->sh_offset &&
426 			     y->sh_offset + y->sh_size > x->sh_offset)) {
427 				LOG_ERR("Region %d ELF file range (0x%zx +%zd) "
428 					"overlaps with %d (0x%zx +%zd)",
429 					i, (size_t)x->sh_offset, (size_t)x->sh_size,
430 					j, (size_t)y->sh_offset, (size_t)y->sh_size);
431 				return -ENOEXEC;
432 			}
433 		}
434 	}
435 
436 	/*
437 	 * Calculate each ELF section's offset inside its memory region. This
438 	 * is done as a separate pass so the final regions are already defined.
439 	 */
440 	for (i = 0; i < ext->sect_cnt; ++i) {
441 		elf_shdr_t *shdr = ext->sect_hdrs + i;
442 		enum llext_mem mem_idx = ldr->sect_map[i].mem_idx;
443 
444 		if (mem_idx != LLEXT_MEM_COUNT) {
445 			ldr->sect_map[i].offset = shdr->sh_offset - ldr->sects[mem_idx].sh_offset;
446 		}
447 	}
448 
449 	return 0;
450 }
451 
llext_count_export_syms(struct llext_loader * ldr,struct llext * ext)452 static int llext_count_export_syms(struct llext_loader *ldr, struct llext *ext)
453 {
454 	size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
455 	size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
456 	int sym_cnt = syms_size / sizeof(elf_sym_t);
457 	const char *name;
458 	elf_sym_t sym;
459 	int i, ret;
460 	size_t pos;
461 
462 	LOG_DBG("symbol count %u", sym_cnt);
463 
464 	ext->sym_tab.sym_cnt = 0;
465 	for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset;
466 	     i < sym_cnt;
467 	     i++, pos += ent_size) {
468 		if (!i) {
469 			/* A dummy entry */
470 			continue;
471 		}
472 
473 		ret = llext_seek(ldr, pos);
474 		if (ret != 0) {
475 			return ret;
476 		}
477 
478 		ret = llext_read(ldr, &sym, ent_size);
479 		if (ret != 0) {
480 			return ret;
481 		}
482 
483 		uint32_t stt = ELF_ST_TYPE(sym.st_info);
484 		uint32_t stb = ELF_ST_BIND(sym.st_info);
485 		uint32_t sect = sym.st_shndx;
486 
487 		name = llext_string(ldr, ext, LLEXT_MEM_STRTAB, sym.st_name);
488 
489 		if ((stt == STT_FUNC || stt == STT_OBJECT) && stb == STB_GLOBAL) {
490 			LOG_DBG("function symbol %d, name %s, type tag %d, bind %d, sect %d",
491 				i, name, stt, stb, sect);
492 			ext->sym_tab.sym_cnt++;
493 		} else {
494 			LOG_DBG("unhandled symbol %d, name %s, type tag %d, bind %d, sect %d",
495 				i, name, stt, stb, sect);
496 		}
497 	}
498 
499 	return 0;
500 }
501 
llext_allocate_symtab(struct llext_loader * ldr,struct llext * ext)502 static int llext_allocate_symtab(struct llext_loader *ldr, struct llext *ext)
503 {
504 	struct llext_symtable *sym_tab = &ext->sym_tab;
505 	size_t syms_size = sym_tab->sym_cnt * sizeof(struct llext_symbol);
506 
507 	sym_tab->syms = llext_alloc(syms_size);
508 	if (!sym_tab->syms) {
509 		return -ENOMEM;
510 	}
511 	memset(sym_tab->syms, 0, syms_size);
512 	ext->alloc_size += syms_size;
513 
514 	return 0;
515 }
516 
llext_export_symbols(struct llext_loader * ldr,struct llext * ext)517 static int llext_export_symbols(struct llext_loader *ldr, struct llext *ext)
518 {
519 	elf_shdr_t *shdr = ldr->sects + LLEXT_MEM_EXPORT;
520 	struct llext_symbol *sym;
521 	unsigned int i;
522 
523 	if (shdr->sh_size < sizeof(struct llext_symbol)) {
524 		/* Not found, no symbols exported */
525 		return 0;
526 	}
527 
528 	struct llext_symtable *exp_tab = &ext->exp_tab;
529 
530 	exp_tab->sym_cnt = shdr->sh_size / sizeof(struct llext_symbol);
531 	exp_tab->syms = llext_alloc(exp_tab->sym_cnt * sizeof(struct llext_symbol));
532 	if (!exp_tab->syms) {
533 		return -ENOMEM;
534 	}
535 
536 	for (i = 0, sym = ext->mem[LLEXT_MEM_EXPORT];
537 	     i < exp_tab->sym_cnt;
538 	     i++, sym++) {
539 		exp_tab->syms[i].name = sym->name;
540 		exp_tab->syms[i].addr = sym->addr;
541 		LOG_DBG("sym %p name %s in %p", sym->addr, sym->name, exp_tab->syms + i);
542 	}
543 
544 	return 0;
545 }
546 
llext_copy_symbols(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)547 static int llext_copy_symbols(struct llext_loader *ldr, struct llext *ext,
548 			      const struct llext_load_param *ldr_parm)
549 {
550 	size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
551 	size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
552 	int sym_cnt = syms_size / sizeof(elf_sym_t);
553 	struct llext_symtable *sym_tab = &ext->sym_tab;
554 	elf_sym_t sym;
555 	int i, j, ret;
556 	size_t pos;
557 
558 	for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset, j = 0;
559 	     i < sym_cnt;
560 	     i++, pos += ent_size) {
561 		if (!i) {
562 			/* A dummy entry */
563 			continue;
564 		}
565 
566 		ret = llext_seek(ldr, pos);
567 		if (ret != 0) {
568 			return ret;
569 		}
570 
571 		ret = llext_read(ldr, &sym, ent_size);
572 		if (ret != 0) {
573 			return ret;
574 		}
575 
576 		uint32_t stt = ELF_ST_TYPE(sym.st_info);
577 		uint32_t stb = ELF_ST_BIND(sym.st_info);
578 		unsigned int shndx = sym.st_shndx;
579 
580 		if ((stt == STT_FUNC || stt == STT_OBJECT) &&
581 		    stb == STB_GLOBAL && shndx != SHN_UNDEF) {
582 			const char *name = llext_string(ldr, ext, LLEXT_MEM_STRTAB, sym.st_name);
583 
584 			__ASSERT(j <= sym_tab->sym_cnt, "Miscalculated symbol number %u\n", j);
585 
586 			sym_tab->syms[j].name = name;
587 
588 			elf_shdr_t *shdr = ext->sect_hdrs + shndx;
589 			uintptr_t section_addr = shdr->sh_addr;
590 
591 			if (ldr_parm->pre_located &&
592 			    (!ldr_parm->section_detached || !ldr_parm->section_detached(shdr))) {
593 				sym_tab->syms[j].addr = (uint8_t *)sym.st_value +
594 					(ldr->hdr.e_type == ET_REL ? section_addr : 0);
595 			} else {
596 				const void *base;
597 
598 				base = llext_loaded_sect_ptr(ldr, ext, shndx);
599 				if (!base) {
600 					/* If the section is not mapped, try to peek.
601 					 * Be noisy about it, since this is addressing
602 					 * data that was missed by llext_map_sections.
603 					 */
604 					base = llext_peek(ldr, shdr->sh_offset);
605 					if (base) {
606 						LOG_DBG("section %d peeked at %p", shndx, base);
607 					} else {
608 						LOG_ERR("No data for section %d", shndx);
609 						return -ENOTSUP;
610 					}
611 				}
612 
613 				sym_tab->syms[j].addr = (uint8_t *)base + sym.st_value -
614 					(ldr->hdr.e_type == ET_REL ? 0 : section_addr);
615 			}
616 
617 			LOG_DBG("function symbol %d name %s addr %p",
618 				j, name, sym_tab->syms[j].addr);
619 			j++;
620 		}
621 	}
622 
623 	return 0;
624 }
625 
626 /*
627  * Load a valid ELF as an extension
628  */
do_llext_load(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)629 int do_llext_load(struct llext_loader *ldr, struct llext *ext,
630 		  const struct llext_load_param *ldr_parm)
631 {
632 	const struct llext_load_param default_ldr_parm = LLEXT_LOAD_PARAM_DEFAULT;
633 	int ret;
634 
635 	if (!ldr_parm) {
636 		ldr_parm = &default_ldr_parm;
637 	}
638 
639 	/* Zero all memory that is affected by the loading process
640 	 * (see the NOTICE at the top of this file).
641 	 */
642 	memset(ext, 0, sizeof(*ext));
643 	ldr->sect_map = NULL;
644 
645 	LOG_DBG("Loading ELF data...");
646 	ret = llext_prepare(ldr);
647 	if (ret != 0) {
648 		LOG_ERR("Failed to prepare the loader, ret %d", ret);
649 		goto out;
650 	}
651 
652 	ret = llext_load_elf_data(ldr, ext);
653 	if (ret != 0) {
654 		LOG_ERR("Failed to load basic ELF data, ret %d", ret);
655 		goto out;
656 	}
657 
658 #ifdef CONFIG_USERSPACE
659 	ret = k_mem_domain_init(&ext->mem_domain, 0, NULL);
660 	if (ret != 0) {
661 		LOG_ERR("Failed to initialize extenion memory domain %d", ret);
662 		goto out;
663 	}
664 #endif
665 
666 	LOG_DBG("Finding ELF tables...");
667 	ret = llext_find_tables(ldr, ext);
668 	if (ret != 0) {
669 		LOG_ERR("Failed to find important ELF tables, ret %d", ret);
670 		goto out;
671 	}
672 
673 	LOG_DBG("Allocate and copy strings...");
674 	ret = llext_copy_strings(ldr, ext);
675 	if (ret != 0) {
676 		LOG_ERR("Failed to copy ELF string sections, ret %d", ret);
677 		goto out;
678 	}
679 
680 	LOG_DBG("Mapping ELF sections...");
681 	ret = llext_map_sections(ldr, ext, ldr_parm);
682 	if (ret != 0) {
683 		LOG_ERR("Failed to map ELF sections, ret %d", ret);
684 		goto out;
685 	}
686 
687 	LOG_DBG("Allocate and copy regions...");
688 	ret = llext_copy_regions(ldr, ext, ldr_parm);
689 	if (ret != 0) {
690 		LOG_ERR("Failed to copy regions, ret %d", ret);
691 		goto out;
692 	}
693 
694 	LOG_DBG("Counting exported symbols...");
695 	ret = llext_count_export_syms(ldr, ext);
696 	if (ret != 0) {
697 		LOG_ERR("Failed to count exported ELF symbols, ret %d", ret);
698 		goto out;
699 	}
700 
701 	LOG_DBG("Allocating memory for symbol table...");
702 	ret = llext_allocate_symtab(ldr, ext);
703 	if (ret != 0) {
704 		LOG_ERR("Failed to allocate extension symbol table, ret %d", ret);
705 		goto out;
706 	}
707 
708 	LOG_DBG("Copying symbols...");
709 	ret = llext_copy_symbols(ldr, ext, ldr_parm);
710 	if (ret != 0) {
711 		LOG_ERR("Failed to copy symbols, ret %d", ret);
712 		goto out;
713 	}
714 
715 	if (ldr_parm->relocate_local) {
716 		LOG_DBG("Linking ELF...");
717 		ret = llext_link(ldr, ext, ldr_parm);
718 		if (ret != 0) {
719 			LOG_ERR("Failed to link, ret %d", ret);
720 			goto out;
721 		}
722 	}
723 
724 	ret = llext_export_symbols(ldr, ext);
725 	if (ret != 0) {
726 		LOG_ERR("Failed to export, ret %d", ret);
727 		goto out;
728 	}
729 
730 	llext_adjust_mmu_permissions(ext);
731 
732 out:
733 	/*
734 	 * Free resources only used during loading. Note that this exploits
735 	 * the fact that freeing a NULL pointer has no effect.
736 	 */
737 
738 	llext_free(ldr->sect_map);
739 	ldr->sect_map = NULL;
740 
741 	/* Until proper inter-llext linking is implemented, the symbol table is
742 	 * not useful outside of the loading process; keep it only if debugging
743 	 * is enabled and no error is detected.
744 	 */
745 	if (!(IS_ENABLED(CONFIG_LLEXT_LOG_LEVEL_DBG) && ret == 0)) {
746 		llext_free(ext->sym_tab.syms);
747 		ext->sym_tab.sym_cnt = 0;
748 		ext->sym_tab.syms = NULL;
749 	}
750 
751 	if (ret != 0) {
752 		LOG_DBG("Failed to load extension: %d", ret);
753 
754 		/* Since the loading process failed, free the resources that
755 		 * were allocated for the lifetime of the extension as well,
756 		 * such as regions and exported symbols.
757 		 */
758 		llext_free_regions(ext);
759 		llext_free(ext->exp_tab.syms);
760 		ext->exp_tab.sym_cnt = 0;
761 		ext->exp_tab.syms = NULL;
762 	} else {
763 		LOG_DBG("loaded module, .text at %p, .rodata at %p", ext->mem[LLEXT_MEM_TEXT],
764 			ext->mem[LLEXT_MEM_RODATA]);
765 	}
766 
767 	llext_finalize(ldr);
768 
769 	return ret;
770 }
771