1 /*
2 * Copyright (c) 2023 Intel Corporation
3 * Copyright (c) 2024 Arduino SA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/elf.h>
10 #include <zephyr/llext/loader.h>
11 #include <zephyr/llext/llext.h>
12 #include <zephyr/llext/llext_internal.h>
13 #include <zephyr/kernel.h>
14
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
17
18 #include <string.h>
19
20 #include "llext_priv.h"
21
22 /*
23 * NOTICE: Functions in this file do not clean up allocations in their error
24 * paths; instead, this is performed once and for all when leaving the parent
25 * `do_llext_load()` function. This approach consolidates memory management
26 * in a single place, simplifying error handling and reducing the risk of
27 * memory leaks.
28 *
29 * The following rationale applies:
30 *
31 * - The input `struct llext` and fields in `struct loader` are zero-filled
32 * at the beginning of the do_llext_load function, so that every pointer is
33 * set to NULL and every bool is false.
34 * - If some function called by do_llext_load allocates memory, it does so by
35 * immediately writing the pointer in the `ext` and `ldr` structures.
36 * - do_llext_load() will clean up the memory allocated by the functions it
37 * calls, taking into account if the load process was successful or not.
38 */
39
40 static const char ELF_MAGIC[] = {0x7f, 'E', 'L', 'F'};
41
llext_loaded_sect_ptr(struct llext_loader * ldr,struct llext * ext,unsigned int sh_ndx)42 const void *llext_loaded_sect_ptr(struct llext_loader *ldr, struct llext *ext, unsigned int sh_ndx)
43 {
44 enum llext_mem mem_idx = ldr->sect_map[sh_ndx].mem_idx;
45
46 if (mem_idx == LLEXT_MEM_COUNT) {
47 return NULL;
48 }
49
50 return (const uint8_t *)ext->mem[mem_idx] + ldr->sect_map[sh_ndx].offset;
51 }
52
53 /*
54 * Load basic ELF file data
55 */
56
llext_load_elf_data(struct llext_loader * ldr,struct llext * ext)57 static int llext_load_elf_data(struct llext_loader *ldr, struct llext *ext)
58 {
59 int ret;
60
61 /* read ELF header */
62
63 ret = llext_seek(ldr, 0);
64 if (ret != 0) {
65 LOG_ERR("Failed to seek for ELF header");
66 return ret;
67 }
68
69 ret = llext_read(ldr, &ldr->hdr, sizeof(ldr->hdr));
70 if (ret != 0) {
71 LOG_ERR("Failed to read ELF header");
72 return ret;
73 }
74
75 /* check whether this is a valid ELF file */
76 if (memcmp(ldr->hdr.e_ident, ELF_MAGIC, sizeof(ELF_MAGIC)) != 0) {
77 LOG_HEXDUMP_ERR(ldr->hdr.e_ident, 16, "Invalid ELF, magic does not match");
78 return -ENOEXEC;
79 }
80
81 switch (ldr->hdr.e_type) {
82 case ET_REL:
83 LOG_DBG("Loading relocatable ELF");
84 break;
85
86 case ET_DYN:
87 LOG_DBG("Loading shared ELF");
88 break;
89
90 default:
91 LOG_ERR("Unsupported ELF file type %x", ldr->hdr.e_type);
92 return -ENOEXEC;
93 }
94
95 /*
96 * Read all ELF section headers and initialize maps. Buffers allocated
97 * below are freed when leaving do_llext_load(), so don't count them in
98 * alloc_size.
99 */
100
101 if (ldr->hdr.e_shentsize != sizeof(elf_shdr_t)) {
102 LOG_ERR("Invalid section header size %d", ldr->hdr.e_shentsize);
103 return -ENOEXEC;
104 }
105
106 ext->sect_cnt = ldr->hdr.e_shnum;
107
108 size_t sect_map_sz = ext->sect_cnt * sizeof(ldr->sect_map[0]);
109
110 ldr->sect_map = llext_alloc(sect_map_sz);
111 if (!ldr->sect_map) {
112 LOG_ERR("Failed to allocate section map, size %zu", sect_map_sz);
113 return -ENOMEM;
114 }
115 ext->alloc_size += sect_map_sz;
116 for (int i = 0; i < ext->sect_cnt; i++) {
117 ldr->sect_map[i].mem_idx = LLEXT_MEM_COUNT;
118 ldr->sect_map[i].offset = 0;
119 }
120
121 ext->sect_hdrs = (elf_shdr_t *)llext_peek(ldr, ldr->hdr.e_shoff);
122 if (ext->sect_hdrs) {
123 ext->sect_hdrs_on_heap = false;
124 } else {
125 size_t sect_hdrs_sz = ext->sect_cnt * sizeof(ext->sect_hdrs[0]);
126
127 ext->sect_hdrs_on_heap = true;
128 ext->sect_hdrs = llext_alloc(sect_hdrs_sz);
129 if (!ext->sect_hdrs) {
130 LOG_ERR("Failed to allocate section headers, size %zu", sect_hdrs_sz);
131 return -ENOMEM;
132 }
133
134 ret = llext_seek(ldr, ldr->hdr.e_shoff);
135 if (ret != 0) {
136 LOG_ERR("Failed to seek for section headers");
137 return ret;
138 }
139
140 ret = llext_read(ldr, ext->sect_hdrs, sect_hdrs_sz);
141 if (ret != 0) {
142 LOG_ERR("Failed to read section headers");
143 return ret;
144 }
145 }
146
147 return 0;
148 }
149
150 /*
151 * Find all relevant string and symbol tables
152 */
llext_find_tables(struct llext_loader * ldr,struct llext * ext)153 static int llext_find_tables(struct llext_loader *ldr, struct llext *ext)
154 {
155 int table_cnt, i;
156 int shstrtab_ndx = ldr->hdr.e_shstrndx;
157 int strtab_ndx = -1;
158
159 memset(ldr->sects, 0, sizeof(ldr->sects));
160
161 /* Find symbol and string tables */
162 for (i = 0, table_cnt = 0; i < ext->sect_cnt && table_cnt < 3; ++i) {
163 elf_shdr_t *shdr = ext->sect_hdrs + i;
164
165 LOG_DBG("section %d at %#zx: name %d, type %d, flags %#zx, "
166 "addr %#zx, align %#zx, size %zd, link %d, info %d",
167 i,
168 (size_t)shdr->sh_offset,
169 shdr->sh_name,
170 shdr->sh_type,
171 (size_t)shdr->sh_flags,
172 (size_t)shdr->sh_addr,
173 (size_t)shdr->sh_addralign,
174 (size_t)shdr->sh_size,
175 shdr->sh_link,
176 shdr->sh_info);
177
178 if (shdr->sh_type == SHT_SYMTAB && ldr->hdr.e_type == ET_REL) {
179 LOG_DBG("symtab at %d", i);
180 ldr->sects[LLEXT_MEM_SYMTAB] = *shdr;
181 ldr->sect_map[i].mem_idx = LLEXT_MEM_SYMTAB;
182 strtab_ndx = shdr->sh_link;
183 table_cnt++;
184 } else if (shdr->sh_type == SHT_DYNSYM && ldr->hdr.e_type == ET_DYN) {
185 LOG_DBG("dynsym at %d", i);
186 ldr->sects[LLEXT_MEM_SYMTAB] = *shdr;
187 ldr->sect_map[i].mem_idx = LLEXT_MEM_SYMTAB;
188 strtab_ndx = shdr->sh_link;
189 table_cnt++;
190 } else if (shdr->sh_type == SHT_STRTAB && i == shstrtab_ndx) {
191 LOG_DBG("shstrtab at %d", i);
192 ldr->sects[LLEXT_MEM_SHSTRTAB] = *shdr;
193 ldr->sect_map[i].mem_idx = LLEXT_MEM_SHSTRTAB;
194 table_cnt++;
195 } else if (shdr->sh_type == SHT_STRTAB && i == strtab_ndx) {
196 LOG_DBG("strtab at %d", i);
197 ldr->sects[LLEXT_MEM_STRTAB] = *shdr;
198 ldr->sect_map[i].mem_idx = LLEXT_MEM_STRTAB;
199 table_cnt++;
200 }
201 }
202
203 if (!ldr->sects[LLEXT_MEM_SHSTRTAB].sh_type ||
204 !ldr->sects[LLEXT_MEM_STRTAB].sh_type ||
205 !ldr->sects[LLEXT_MEM_SYMTAB].sh_type) {
206 LOG_ERR("Some sections are missing or present multiple times!");
207 return -ENOEXEC;
208 }
209
210 return 0;
211 }
212
213 /* First (bottom) and last (top) entries of a region, inclusive, for a specific field. */
214 #define REGION_BOT(reg, field) (size_t)(reg->field + reg->sh_info)
215 #define REGION_TOP(reg, field) (size_t)(reg->field + reg->sh_size - 1)
216
217 /* Check if two regions x and y have any overlap on a given field. Any shared value counts. */
218 #define REGIONS_OVERLAP_ON(x, y, f) \
219 ((REGION_BOT(x, f) <= REGION_BOT(y, f) && REGION_TOP(x, f) >= REGION_BOT(y, f)) || \
220 (REGION_BOT(y, f) <= REGION_BOT(x, f) && REGION_TOP(y, f) >= REGION_BOT(x, f)))
221
222 /*
223 * Loops through all defined ELF sections and collapses those with similar
224 * usage flags into LLEXT "regions", taking alignment constraints into account.
225 * Checks the generated regions for overlaps and calculates the offset of each
226 * section within its region.
227 *
228 * This information is stored in the ldr->sects and ldr->sect_map arrays.
229 */
llext_map_sections(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)230 static int llext_map_sections(struct llext_loader *ldr, struct llext *ext,
231 const struct llext_load_param *ldr_parm)
232 {
233 int i, j;
234 const char *name;
235
236 for (i = 0; i < ext->sect_cnt; ++i) {
237 elf_shdr_t *shdr = ext->sect_hdrs + i;
238
239 name = llext_section_name(ldr, ext, shdr);
240
241 if (ldr->sect_map[i].mem_idx != LLEXT_MEM_COUNT) {
242 LOG_DBG("section %d name %s already mapped to region %d",
243 i, name, ldr->sect_map[i].mem_idx);
244 continue;
245 }
246
247 /* Identify the section type by its flags */
248 enum llext_mem mem_idx;
249
250 switch (shdr->sh_type) {
251 case SHT_NOBITS:
252 mem_idx = LLEXT_MEM_BSS;
253 break;
254 case SHT_PROGBITS:
255 if (shdr->sh_flags & SHF_EXECINSTR) {
256 mem_idx = LLEXT_MEM_TEXT;
257 } else if (shdr->sh_flags & SHF_WRITE) {
258 mem_idx = LLEXT_MEM_DATA;
259 } else {
260 mem_idx = LLEXT_MEM_RODATA;
261 }
262 break;
263 case SHT_PREINIT_ARRAY:
264 mem_idx = LLEXT_MEM_PREINIT;
265 break;
266 case SHT_INIT_ARRAY:
267 mem_idx = LLEXT_MEM_INIT;
268 break;
269 case SHT_FINI_ARRAY:
270 mem_idx = LLEXT_MEM_FINI;
271 break;
272 default:
273 mem_idx = LLEXT_MEM_COUNT;
274 break;
275 }
276
277 /* Special exception for .exported_sym */
278 if (strcmp(name, ".exported_sym") == 0) {
279 mem_idx = LLEXT_MEM_EXPORT;
280 }
281
282 if (mem_idx == LLEXT_MEM_COUNT ||
283 !(shdr->sh_flags & SHF_ALLOC) ||
284 shdr->sh_size == 0) {
285 LOG_DBG("section %d name %s skipped", i, name);
286 continue;
287 }
288
289 switch (mem_idx) {
290 case LLEXT_MEM_PREINIT:
291 case LLEXT_MEM_INIT:
292 case LLEXT_MEM_FINI:
293 if (shdr->sh_entsize != sizeof(void *) ||
294 shdr->sh_size % shdr->sh_entsize != 0) {
295 LOG_ERR("Invalid %s array in section %d", name, i);
296 return -ENOEXEC;
297 }
298 default:
299 break;
300 }
301
302 LOG_DBG("section %d name %s maps to region %d", i, name, mem_idx);
303
304 ldr->sect_map[i].mem_idx = mem_idx;
305 elf_shdr_t *region = ldr->sects + mem_idx;
306
307 /*
308 * Some applications may require specific ELF sections to not
309 * be included in their default memory regions; e.g. the ELF
310 * file may contain executable sections that are designed to be
311 * placed in slower memory. Don't merge such sections into main
312 * regions.
313 */
314 if (ldr_parm->section_detached && ldr_parm->section_detached(shdr)) {
315 continue;
316 }
317
318 if (region->sh_type == SHT_NULL) {
319 /* First section of this type, copy all info to the
320 * region descriptor.
321 */
322 memcpy(region, shdr, sizeof(*region));
323 continue;
324 }
325
326 /* Make sure this section is compatible with the existing region */
327 if ((shdr->sh_flags & SHF_BASIC_TYPE_MASK) !=
328 (region->sh_flags & SHF_BASIC_TYPE_MASK)) {
329 LOG_ERR("Unsupported section flags %#x / %#x for %s (region %d)",
330 (uint32_t)shdr->sh_flags, (uint32_t)region->sh_flags,
331 name, mem_idx);
332 return -ENOEXEC;
333 }
334
335 /* Check if this region type is extendable */
336 switch (mem_idx) {
337 case LLEXT_MEM_BSS:
338 /* SHT_NOBITS sections cannot be merged properly:
339 * as they use no space in the file, the logic
340 * below does not work; they must be treated as
341 * independent entities.
342 */
343 LOG_ERR("Multiple SHT_NOBITS sections are not supported");
344 return -ENOTSUP;
345 case LLEXT_MEM_PREINIT:
346 case LLEXT_MEM_INIT:
347 case LLEXT_MEM_FINI:
348 /* These regions are not extendable and must be
349 * referenced at most once in the ELF file.
350 */
351 LOG_ERR("Region %d redefined", mem_idx);
352 return -ENOEXEC;
353 default:
354 break;
355 }
356
357 if (ldr->hdr.e_type == ET_DYN) {
358 /* In shared objects, sh_addr is the VMA.
359 * Before merging this section in the region,
360 * make sure the delta in VMAs matches that of
361 * file offsets.
362 */
363 if (shdr->sh_addr - region->sh_addr !=
364 shdr->sh_offset - region->sh_offset) {
365 LOG_ERR("Incompatible section addresses for %s (region %d)",
366 name, mem_idx);
367 return -ENOEXEC;
368 }
369 }
370
371 /*
372 * Extend the current region to include the new section
373 * (overlaps are detected later)
374 */
375 size_t address = MIN(region->sh_addr, shdr->sh_addr);
376 size_t bot_ofs = MIN(region->sh_offset, shdr->sh_offset);
377 size_t top_ofs = MAX(region->sh_offset + region->sh_size,
378 shdr->sh_offset + shdr->sh_size);
379 size_t addralign = MAX(region->sh_addralign, shdr->sh_addralign);
380
381 region->sh_addr = address;
382 region->sh_offset = bot_ofs;
383 region->sh_size = top_ofs - bot_ofs;
384 region->sh_addralign = addralign;
385 }
386
387 /*
388 * Make sure each of the mapped sections satisfies its alignment
389 * requirement when placed in the region.
390 *
391 * The ELF standard already guarantees that each section's offset in
392 * the file satisfies its own alignment, and since only powers of 2 can
393 * be specified, a solution satisfying the largest alignment will also
394 * work for any smaller one. Aligning the ELF region to the largest
395 * requirement among the contained sections will then guarantee that
396 * all are properly aligned.
397 *
398 * However, adjusting the region's start address will make the region
399 * appear larger than it actually is, and might even make it overlap
400 * with others. To allow for further precise adjustments, the length of
401 * the calculated pre-padding area is stored in the 'sh_info' field of
402 * the region descriptor, which is not used on any SHF_ALLOC section.
403 */
404 for (i = 0; i < LLEXT_MEM_COUNT; i++) {
405 elf_shdr_t *region = ldr->sects + i;
406
407 if (region->sh_type == SHT_NULL || region->sh_size == 0) {
408 /* Skip empty regions */
409 continue;
410 }
411
412 size_t prepad = region->sh_offset & (region->sh_addralign - 1);
413
414 if (ldr->hdr.e_type == ET_DYN) {
415 /* Only shared files populate sh_addr fields */
416 if (prepad > region->sh_addr) {
417 LOG_ERR("Bad section alignment in region %d", i);
418 return -ENOEXEC;
419 }
420
421 region->sh_addr -= prepad;
422 }
423 region->sh_offset -= prepad;
424 region->sh_size += prepad;
425 region->sh_info = prepad;
426 }
427
428 /*
429 * Test that no computed region overlaps. This can happen if sections of
430 * different llext_mem type are interleaved in the ELF file or in VMAs.
431 */
432 for (i = 0; i < LLEXT_MEM_COUNT; i++) {
433 for (j = i+1; j < LLEXT_MEM_COUNT; j++) {
434 elf_shdr_t *x = ldr->sects + i;
435 elf_shdr_t *y = ldr->sects + j;
436
437 if (x->sh_type == SHT_NULL || x->sh_size == 0 ||
438 y->sh_type == SHT_NULL || y->sh_size == 0) {
439 /* Skip empty regions */
440 continue;
441 }
442
443 /*
444 * The export symbol table may be surrounded by
445 * other data sections. Ignore overlaps in that
446 * case.
447 */
448 if ((i == LLEXT_MEM_DATA || i == LLEXT_MEM_RODATA) &&
449 j == LLEXT_MEM_EXPORT) {
450 continue;
451 }
452
453 /*
454 * Exported symbols region can also overlap
455 * with rodata.
456 */
457 if (i == LLEXT_MEM_EXPORT || j == LLEXT_MEM_EXPORT) {
458 continue;
459 }
460
461 if ((ldr->hdr.e_type == ET_DYN) &&
462 (x->sh_flags & SHF_ALLOC) && (y->sh_flags & SHF_ALLOC)) {
463 /*
464 * Test regions that have VMA ranges for overlaps
465 */
466 if (REGIONS_OVERLAP_ON(x, y, sh_addr)) {
467 LOG_ERR("Region %d VMA range (%#zx-%#zx) "
468 "overlaps with %d (%#zx-%#zx)",
469 i, REGION_BOT(x, sh_addr), REGION_TOP(x, sh_addr),
470 j, REGION_BOT(y, sh_addr), REGION_TOP(y, sh_addr));
471 return -ENOEXEC;
472 }
473 }
474
475 /*
476 * Test file offsets. BSS sections store no
477 * data in the file and must not be included
478 * in checks to avoid false positives.
479 */
480 if (i == LLEXT_MEM_BSS || j == LLEXT_MEM_BSS) {
481 continue;
482 }
483
484 if (REGIONS_OVERLAP_ON(x, y, sh_offset)) {
485 LOG_ERR("Region %d ELF file range (%#zx-%#zx) "
486 "overlaps with %d (%#zx-%#zx)",
487 i, REGION_BOT(x, sh_offset), REGION_TOP(x, sh_offset),
488 j, REGION_BOT(y, sh_offset), REGION_TOP(y, sh_offset));
489 return -ENOEXEC;
490 }
491 }
492 }
493
494 /*
495 * Calculate each ELF section's offset inside its memory region. This
496 * is done as a separate pass so the final regions are already defined.
497 */
498 for (i = 0; i < ext->sect_cnt; ++i) {
499 elf_shdr_t *shdr = ext->sect_hdrs + i;
500 enum llext_mem mem_idx = ldr->sect_map[i].mem_idx;
501
502 if (mem_idx != LLEXT_MEM_COUNT) {
503 ldr->sect_map[i].offset = shdr->sh_offset - ldr->sects[mem_idx].sh_offset;
504 }
505 }
506
507 return 0;
508 }
509
llext_count_export_syms(struct llext_loader * ldr,struct llext * ext)510 static int llext_count_export_syms(struct llext_loader *ldr, struct llext *ext)
511 {
512 size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
513 size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
514 int sym_cnt = syms_size / sizeof(elf_sym_t);
515 const char *name;
516 elf_sym_t sym;
517 int i, ret;
518 size_t pos;
519
520 LOG_DBG("symbol count %u", sym_cnt);
521
522 ext->sym_tab.sym_cnt = 0;
523 for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset;
524 i < sym_cnt;
525 i++, pos += ent_size) {
526 if (!i) {
527 /* A dummy entry */
528 continue;
529 }
530
531 ret = llext_seek(ldr, pos);
532 if (ret != 0) {
533 return ret;
534 }
535
536 ret = llext_read(ldr, &sym, ent_size);
537 if (ret != 0) {
538 return ret;
539 }
540
541 uint32_t stt = ELF_ST_TYPE(sym.st_info);
542 uint32_t stb = ELF_ST_BIND(sym.st_info);
543 uint32_t sect = sym.st_shndx;
544
545 name = llext_symbol_name(ldr, ext, &sym);
546
547 if ((stt == STT_FUNC || stt == STT_OBJECT) && stb == STB_GLOBAL) {
548 LOG_DBG("function symbol %d, name %s, type tag %d, bind %d, sect %d",
549 i, name, stt, stb, sect);
550 ext->sym_tab.sym_cnt++;
551 } else {
552 LOG_DBG("unhandled symbol %d, name %s, type tag %d, bind %d, sect %d",
553 i, name, stt, stb, sect);
554 }
555 }
556
557 return 0;
558 }
559
llext_allocate_symtab(struct llext_loader * ldr,struct llext * ext)560 static int llext_allocate_symtab(struct llext_loader *ldr, struct llext *ext)
561 {
562 struct llext_symtable *sym_tab = &ext->sym_tab;
563 size_t syms_size = sym_tab->sym_cnt * sizeof(struct llext_symbol);
564
565 sym_tab->syms = llext_alloc(syms_size);
566 if (!sym_tab->syms) {
567 return -ENOMEM;
568 }
569 memset(sym_tab->syms, 0, syms_size);
570 ext->alloc_size += syms_size;
571
572 return 0;
573 }
574
llext_export_symbols(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)575 static int llext_export_symbols(struct llext_loader *ldr, struct llext *ext,
576 const struct llext_load_param *ldr_parm)
577 {
578 struct llext_symtable *exp_tab = &ext->exp_tab;
579 struct llext_symbol *sym;
580 unsigned int i;
581
582 if (IS_ENABLED(CONFIG_LLEXT_IMPORT_ALL_GLOBALS)) {
583 /* Use already discovered global symbols */
584 exp_tab->sym_cnt = ext->sym_tab.sym_cnt;
585 sym = ext->sym_tab.syms;
586 } else {
587 /* Only use symbols in the .exported_sym section */
588 exp_tab->sym_cnt = ldr->sects[LLEXT_MEM_EXPORT].sh_size
589 / sizeof(struct llext_symbol);
590 sym = ext->mem[LLEXT_MEM_EXPORT];
591 }
592
593 if (!exp_tab->sym_cnt) {
594 /* No symbols exported */
595 return 0;
596 }
597
598 exp_tab->syms = llext_alloc(exp_tab->sym_cnt * sizeof(struct llext_symbol));
599 if (!exp_tab->syms) {
600 return -ENOMEM;
601 }
602
603 for (i = 0; i < exp_tab->sym_cnt; i++, sym++) {
604 /*
605 * Offsets in objects, built for pre-defined addresses have to
606 * be translated to memory locations for symbol name access
607 * during dependency resolution.
608 */
609 const char *name = NULL;
610
611 if (ldr_parm->pre_located) {
612 ssize_t name_offset = llext_file_offset(ldr, (uintptr_t)sym->name);
613
614 if (name_offset > 0) {
615 name = llext_peek(ldr, name_offset);
616 }
617 }
618 if (!name) {
619 name = sym->name;
620 }
621
622 exp_tab->syms[i].name = name;
623 exp_tab->syms[i].addr = sym->addr;
624 LOG_DBG("sym %p name %s", sym->addr, sym->name);
625 }
626
627 return 0;
628 }
629
llext_copy_symbols(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)630 static int llext_copy_symbols(struct llext_loader *ldr, struct llext *ext,
631 const struct llext_load_param *ldr_parm)
632 {
633 size_t ent_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_entsize;
634 size_t syms_size = ldr->sects[LLEXT_MEM_SYMTAB].sh_size;
635 int sym_cnt = syms_size / sizeof(elf_sym_t);
636 struct llext_symtable *sym_tab = &ext->sym_tab;
637 elf_sym_t sym;
638 int i, j, ret;
639 size_t pos;
640
641 for (i = 0, pos = ldr->sects[LLEXT_MEM_SYMTAB].sh_offset, j = 0;
642 i < sym_cnt;
643 i++, pos += ent_size) {
644 if (!i) {
645 /* A dummy entry */
646 continue;
647 }
648
649 ret = llext_seek(ldr, pos);
650 if (ret != 0) {
651 return ret;
652 }
653
654 ret = llext_read(ldr, &sym, ent_size);
655 if (ret != 0) {
656 return ret;
657 }
658
659 uint32_t stt = ELF_ST_TYPE(sym.st_info);
660 uint32_t stb = ELF_ST_BIND(sym.st_info);
661 unsigned int shndx = sym.st_shndx;
662
663 if ((stt == STT_FUNC || stt == STT_OBJECT) &&
664 stb == STB_GLOBAL && shndx != SHN_UNDEF) {
665 const char *name = llext_symbol_name(ldr, ext, &sym);
666
667 __ASSERT(j <= sym_tab->sym_cnt, "Miscalculated symbol number %u\n", j);
668
669 sym_tab->syms[j].name = name;
670
671 elf_shdr_t *shdr = ext->sect_hdrs + shndx;
672 uintptr_t section_addr = shdr->sh_addr;
673
674 if (ldr_parm->pre_located &&
675 (!ldr_parm->section_detached || !ldr_parm->section_detached(shdr))) {
676 sym_tab->syms[j].addr = (uint8_t *)sym.st_value +
677 (ldr->hdr.e_type == ET_REL ? section_addr : 0);
678 } else {
679 const void *base;
680
681 base = llext_loaded_sect_ptr(ldr, ext, shndx);
682 if (!base) {
683 /* If the section is not mapped, try to peek.
684 * Be noisy about it, since this is addressing
685 * data that was missed by llext_map_sections.
686 */
687 base = llext_peek(ldr, shdr->sh_offset);
688 if (base) {
689 LOG_DBG("section %d peeked at %p", shndx, base);
690 } else {
691 LOG_ERR("No data for section %d", shndx);
692 return -ENOTSUP;
693 }
694 }
695
696 sym_tab->syms[j].addr = (uint8_t *)base + sym.st_value -
697 (ldr->hdr.e_type == ET_REL ? 0 : section_addr);
698 }
699
700 LOG_DBG("function symbol %d name %s addr %p",
701 j, name, sym_tab->syms[j].addr);
702 j++;
703 }
704 }
705
706 return 0;
707 }
708
709 /*
710 * Load a valid ELF as an extension
711 */
do_llext_load(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)712 int do_llext_load(struct llext_loader *ldr, struct llext *ext,
713 const struct llext_load_param *ldr_parm)
714 {
715 const struct llext_load_param default_ldr_parm = LLEXT_LOAD_PARAM_DEFAULT;
716 int ret;
717
718 if (!ldr_parm) {
719 ldr_parm = &default_ldr_parm;
720 }
721
722 /* Zero all memory that is affected by the loading process
723 * (see the NOTICE at the top of this file).
724 */
725 memset(ext, 0, sizeof(*ext));
726 ldr->sect_map = NULL;
727
728 LOG_DBG("Loading ELF data...");
729 ret = llext_prepare(ldr);
730 if (ret != 0) {
731 LOG_ERR("Failed to prepare the loader, ret %d", ret);
732 goto out;
733 }
734
735 ret = llext_load_elf_data(ldr, ext);
736 if (ret != 0) {
737 LOG_ERR("Failed to load basic ELF data, ret %d", ret);
738 goto out;
739 }
740
741 #ifdef CONFIG_USERSPACE
742 ret = k_mem_domain_init(&ext->mem_domain, 0, NULL);
743 if (ret != 0) {
744 LOG_ERR("Failed to initialize extenion memory domain %d", ret);
745 goto out;
746 }
747 #endif
748
749 LOG_DBG("Finding ELF tables...");
750 ret = llext_find_tables(ldr, ext);
751 if (ret != 0) {
752 LOG_ERR("Failed to find important ELF tables, ret %d", ret);
753 goto out;
754 }
755
756 LOG_DBG("Allocate and copy strings...");
757 ret = llext_copy_strings(ldr, ext, ldr_parm);
758 if (ret != 0) {
759 LOG_ERR("Failed to copy ELF string sections, ret %d", ret);
760 goto out;
761 }
762
763 LOG_DBG("Mapping ELF sections...");
764 ret = llext_map_sections(ldr, ext, ldr_parm);
765 if (ret != 0) {
766 LOG_ERR("Failed to map ELF sections, ret %d", ret);
767 goto out;
768 }
769
770 LOG_DBG("Allocate and copy regions...");
771 ret = llext_copy_regions(ldr, ext, ldr_parm);
772 if (ret != 0) {
773 LOG_ERR("Failed to copy regions, ret %d", ret);
774 goto out;
775 }
776
777 LOG_DBG("Counting exported symbols...");
778 ret = llext_count_export_syms(ldr, ext);
779 if (ret != 0) {
780 LOG_ERR("Failed to count exported ELF symbols, ret %d", ret);
781 goto out;
782 }
783
784 LOG_DBG("Allocating memory for symbol table...");
785 ret = llext_allocate_symtab(ldr, ext);
786 if (ret != 0) {
787 LOG_ERR("Failed to allocate extension symbol table, ret %d", ret);
788 goto out;
789 }
790
791 LOG_DBG("Copying symbols...");
792 ret = llext_copy_symbols(ldr, ext, ldr_parm);
793 if (ret != 0) {
794 LOG_ERR("Failed to copy symbols, ret %d", ret);
795 goto out;
796 }
797
798 if (ldr_parm->relocate_local) {
799 LOG_DBG("Linking ELF...");
800 ret = llext_link(ldr, ext, ldr_parm);
801 if (ret != 0) {
802 LOG_ERR("Failed to link, ret %d", ret);
803 goto out;
804 }
805 }
806
807 ret = llext_export_symbols(ldr, ext, ldr_parm);
808 if (ret != 0) {
809 LOG_ERR("Failed to export, ret %d", ret);
810 goto out;
811 }
812
813 if (!ldr_parm->pre_located) {
814 llext_adjust_mmu_permissions(ext);
815 }
816
817 out:
818 /*
819 * Free resources only used during loading, unless explicitly requested.
820 * Note that this exploits the fact that freeing a NULL pointer has no effect.
821 */
822
823 if (ret != 0 || !ldr_parm->keep_section_info) {
824 llext_free_inspection_data(ldr, ext);
825 }
826
827 /* Until proper inter-llext linking is implemented, the symbol table is
828 * not useful outside of the loading process; keep it only if debugging
829 * is enabled and no error is detected.
830 */
831 if (!(IS_ENABLED(CONFIG_LLEXT_LOG_LEVEL_DBG) && ret == 0)) {
832 llext_free(ext->sym_tab.syms);
833 ext->sym_tab.sym_cnt = 0;
834 ext->sym_tab.syms = NULL;
835 }
836
837 if (ret != 0) {
838 LOG_DBG("Failed to load extension: %d", ret);
839
840 /* Since the loading process failed, free the resources that
841 * were allocated for the lifetime of the extension as well,
842 * such as regions and exported symbols.
843 */
844 llext_free_regions(ext);
845 llext_free(ext->exp_tab.syms);
846 ext->exp_tab.sym_cnt = 0;
847 ext->exp_tab.syms = NULL;
848 } else {
849 LOG_DBG("loaded module, .text at %p, .rodata at %p", ext->mem[LLEXT_MEM_TEXT],
850 ext->mem[LLEXT_MEM_RODATA]);
851 }
852
853 llext_finalize(ldr);
854
855 return ret;
856 }
857
llext_free_inspection_data(struct llext_loader * ldr,struct llext * ext)858 int llext_free_inspection_data(struct llext_loader *ldr, struct llext *ext)
859 {
860 if (ldr->sect_map) {
861 ext->alloc_size -= ext->sect_cnt * sizeof(ldr->sect_map[0]);
862 llext_free(ldr->sect_map);
863 ldr->sect_map = NULL;
864 }
865
866 return 0;
867 }
868