1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 */
7
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/elf.h>
10 #include <zephyr/llext/loader.h>
11 #include <zephyr/llext/llext.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/cache.h>
14
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(llext, CONFIG_LLEXT_LOG_LEVEL);
17
18 #include <string.h>
19
20 #include "llext_priv.h"
21
22 static sys_slist_t _llext_list = SYS_SLIST_STATIC_INIT(&_llext_list);
23
24 static struct k_mutex llext_lock = Z_MUTEX_INITIALIZER(llext_lock);
25
llext_find_section(struct llext_loader * ldr,const char * search_name)26 ssize_t llext_find_section(struct llext_loader *ldr, const char *search_name)
27 {
28 /* Note that this API is used after llext_load(), so the ldr->sect_hdrs
29 * cache is already freed. A direct search covers all situations.
30 */
31
32 elf_shdr_t *shdr;
33 unsigned int i;
34 size_t pos;
35
36 for (i = 0, pos = ldr->hdr.e_shoff;
37 i < ldr->hdr.e_shnum;
38 i++, pos += ldr->hdr.e_shentsize) {
39 shdr = llext_peek(ldr, pos);
40 if (!shdr) {
41 /* The peek() method isn't supported */
42 return -ENOTSUP;
43 }
44
45 const char *name = llext_peek(ldr,
46 ldr->sects[LLEXT_MEM_SHSTRTAB].sh_offset +
47 shdr->sh_name);
48
49 if (!strcmp(name, search_name)) {
50 return shdr->sh_offset;
51 }
52 }
53
54 return -ENOENT;
55 }
56
57 /*
58 * Note, that while we protect the global llext list while searching, we release
59 * the lock before returning the found extension to the caller. Therefore it's
60 * a responsibility of the caller to protect against races with a freeing
61 * context when calling this function.
62 */
llext_by_name(const char * name)63 struct llext *llext_by_name(const char *name)
64 {
65 k_mutex_lock(&llext_lock, K_FOREVER);
66
67 for (sys_snode_t *node = sys_slist_peek_head(&_llext_list);
68 node != NULL;
69 node = sys_slist_peek_next(node)) {
70 struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
71
72 if (strncmp(ext->name, name, sizeof(ext->name)) == 0) {
73 k_mutex_unlock(&llext_lock);
74 return ext;
75 }
76 }
77
78 k_mutex_unlock(&llext_lock);
79 return NULL;
80 }
81
llext_iterate(int (* fn)(struct llext * ext,void * arg),void * arg)82 int llext_iterate(int (*fn)(struct llext *ext, void *arg), void *arg)
83 {
84 sys_snode_t *node;
85 unsigned int i;
86 int ret = 0;
87
88 k_mutex_lock(&llext_lock, K_FOREVER);
89
90 for (node = sys_slist_peek_head(&_llext_list), i = 0;
91 node;
92 node = sys_slist_peek_next(node), i++) {
93 struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
94
95 ret = fn(ext, arg);
96 if (ret) {
97 break;
98 }
99 }
100
101 k_mutex_unlock(&llext_lock);
102 return ret;
103 }
104
llext_find_sym(const struct llext_symtable * sym_table,const char * sym_name)105 const void *llext_find_sym(const struct llext_symtable *sym_table, const char *sym_name)
106 {
107 if (sym_table == NULL) {
108 /* Built-in symbol table */
109 #ifdef CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID
110 /* 'sym_name' is actually a SLID to search for */
111 uintptr_t slid = (uintptr_t)sym_name;
112
113 /* TODO: perform a binary search instead of linear.
114 * Note that - as of writing - the llext_const_symbol_area
115 * section is sorted in ascending SLID order.
116 * (see scripts/build/llext_prepare_exptab.py)
117 */
118 STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
119 if (slid == sym->slid) {
120 return sym->addr;
121 }
122 }
123 #else
124 STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
125 if (strcmp(sym->name, sym_name) == 0) {
126 return sym->addr;
127 }
128 }
129 #endif
130 } else {
131 /* find symbols in module */
132 for (size_t i = 0; i < sym_table->sym_cnt; i++) {
133 if (strcmp(sym_table->syms[i].name, sym_name) == 0) {
134 return sym_table->syms[i].addr;
135 }
136 }
137 }
138
139 return NULL;
140 }
141
llext_load(struct llext_loader * ldr,const char * name,struct llext ** ext,struct llext_load_param * ldr_parm)142 int llext_load(struct llext_loader *ldr, const char *name, struct llext **ext,
143 struct llext_load_param *ldr_parm)
144 {
145 int ret;
146
147 *ext = llext_by_name(name);
148
149 k_mutex_lock(&llext_lock, K_FOREVER);
150
151 if (*ext) {
152 /* The use count is at least 1 */
153 ret = (*ext)->use_count++;
154 goto out;
155 }
156
157 *ext = llext_alloc(sizeof(struct llext));
158 if (*ext == NULL) {
159 LOG_ERR("Not enough memory for extension metadata");
160 ret = -ENOMEM;
161 goto out;
162 }
163
164 ret = do_llext_load(ldr, *ext, ldr_parm);
165 if (ret < 0) {
166 llext_free(*ext);
167 *ext = NULL;
168 goto out;
169 }
170
171 strncpy((*ext)->name, name, sizeof((*ext)->name));
172 (*ext)->name[sizeof((*ext)->name) - 1] = '\0';
173 (*ext)->use_count++;
174
175 sys_slist_append(&_llext_list, &(*ext)->_llext_list);
176 LOG_INF("Loaded extension %s", (*ext)->name);
177
178 out:
179 k_mutex_unlock(&llext_lock);
180 return ret;
181 }
182
llext_unload(struct llext ** ext)183 int llext_unload(struct llext **ext)
184 {
185 __ASSERT(*ext, "Expected non-null extension");
186 struct llext *tmp = *ext;
187
188 k_mutex_lock(&llext_lock, K_FOREVER);
189 __ASSERT(tmp->use_count, "A valid LLEXT cannot have a zero use-count!");
190
191 if (tmp->use_count-- != 1) {
192 unsigned int ret = tmp->use_count;
193
194 k_mutex_unlock(&llext_lock);
195 return ret;
196 }
197
198 /* FIXME: protect the global list */
199 sys_slist_find_and_remove(&_llext_list, &tmp->_llext_list);
200
201 *ext = NULL;
202 k_mutex_unlock(&llext_lock);
203
204 llext_free_regions(tmp);
205 llext_free(tmp->sym_tab.syms);
206 llext_free(tmp->exp_tab.syms);
207 llext_free(tmp);
208
209 return 0;
210 }
211
llext_call_fn(struct llext * ext,const char * sym_name)212 int llext_call_fn(struct llext *ext, const char *sym_name)
213 {
214 void (*fn)(void);
215
216 fn = llext_find_sym(&ext->exp_tab, sym_name);
217 if (fn == NULL) {
218 return -ENOENT;
219 }
220 fn();
221
222 return 0;
223 }
224