1 /*
2  * Copyright (c) 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  */
7 
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/elf.h>
10 #include <zephyr/llext/loader.h>
11 #include <zephyr/llext/llext.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/cache.h>
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(llext, CONFIG_LLEXT_LOG_LEVEL);
17 
18 #include <string.h>
19 
20 #include "llext_priv.h"
21 
22 static sys_slist_t _llext_list = SYS_SLIST_STATIC_INIT(&_llext_list);
23 
24 static struct k_mutex llext_lock = Z_MUTEX_INITIALIZER(llext_lock);
25 
llext_get_section_header(struct llext_loader * ldr,struct llext * ext,const char * search_name,elf_shdr_t * shdr)26 int llext_get_section_header(struct llext_loader *ldr, struct llext *ext, const char *search_name,
27 			     elf_shdr_t *shdr)
28 {
29 	const elf_shdr_t *tmp;
30 	unsigned int i;
31 
32 	for (i = 0, tmp = ext->sect_hdrs;
33 	     i < ext->sect_cnt;
34 	     i++, tmp++) {
35 		const char *name = llext_peek(ldr,
36 					      ldr->sects[LLEXT_MEM_SHSTRTAB].sh_offset +
37 					      tmp->sh_name);
38 
39 		if (!name) {
40 			return -ENOTSUP;
41 		}
42 
43 		if (!strcmp(name, search_name)) {
44 			*shdr = *tmp;
45 			return 0;
46 		}
47 	}
48 
49 	return -ENOENT;
50 }
51 
llext_find_section(struct llext_loader * ldr,const char * search_name)52 ssize_t llext_find_section(struct llext_loader *ldr, const char *search_name)
53 {
54 	elf_shdr_t *shdr;
55 	unsigned int i;
56 	size_t pos;
57 
58 	for (i = 0, pos = ldr->hdr.e_shoff;
59 	     i < ldr->hdr.e_shnum;
60 	     i++, pos += ldr->hdr.e_shentsize) {
61 		shdr = llext_peek(ldr, pos);
62 		if (!shdr) {
63 			/* The peek() method isn't supported */
64 			return -ENOTSUP;
65 		}
66 
67 		const char *name = llext_peek(ldr,
68 					      ldr->sects[LLEXT_MEM_SHSTRTAB].sh_offset +
69 					      shdr->sh_name);
70 
71 		if (!strcmp(name, search_name)) {
72 			return shdr->sh_offset;
73 		}
74 	}
75 
76 	return -ENOENT;
77 }
78 
79 /*
80  * Note, that while we protect the global llext list while searching, we release
81  * the lock before returning the found extension to the caller. Therefore it's
82  * a responsibility of the caller to protect against races with a freeing
83  * context when calling this function.
84  */
llext_by_name(const char * name)85 struct llext *llext_by_name(const char *name)
86 {
87 	k_mutex_lock(&llext_lock, K_FOREVER);
88 
89 	for (sys_snode_t *node = sys_slist_peek_head(&_llext_list);
90 	     node != NULL;
91 	     node = sys_slist_peek_next(node)) {
92 		struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
93 
94 		if (strncmp(ext->name, name, sizeof(ext->name)) == 0) {
95 			k_mutex_unlock(&llext_lock);
96 			return ext;
97 		}
98 	}
99 
100 	k_mutex_unlock(&llext_lock);
101 	return NULL;
102 }
103 
llext_iterate(int (* fn)(struct llext * ext,void * arg),void * arg)104 int llext_iterate(int (*fn)(struct llext *ext, void *arg), void *arg)
105 {
106 	sys_snode_t *node;
107 	int ret = 0;
108 
109 	k_mutex_lock(&llext_lock, K_FOREVER);
110 
111 	for (node = sys_slist_peek_head(&_llext_list);
112 	     node;
113 	     node = sys_slist_peek_next(node)) {
114 		struct llext *ext = CONTAINER_OF(node, struct llext, _llext_list);
115 
116 		ret = fn(ext, arg);
117 		if (ret) {
118 			break;
119 		}
120 	}
121 
122 	k_mutex_unlock(&llext_lock);
123 	return ret;
124 }
125 
llext_find_sym(const struct llext_symtable * sym_table,const char * sym_name)126 const void *llext_find_sym(const struct llext_symtable *sym_table, const char *sym_name)
127 {
128 	if (sym_table == NULL) {
129 		/* Built-in symbol table */
130 #ifdef CONFIG_LLEXT_EXPORT_BUILTINS_BY_SLID
131 		/* 'sym_name' is actually a SLID to search for */
132 		uintptr_t slid = (uintptr_t)sym_name;
133 
134 		/* TODO: perform a binary search instead of linear.
135 		 * Note that - as of writing - the llext_const_symbol_area
136 		 * section is sorted in ascending SLID order.
137 		 * (see scripts/build/llext_prepare_exptab.py)
138 		 */
139 		STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
140 			if (slid == sym->slid) {
141 				return sym->addr;
142 			}
143 		}
144 #else
145 		STRUCT_SECTION_FOREACH(llext_const_symbol, sym) {
146 			if (strcmp(sym->name, sym_name) == 0) {
147 				return sym->addr;
148 			}
149 		}
150 #endif
151 	} else {
152 		/* find symbols in module */
153 		for (size_t i = 0; i < sym_table->sym_cnt; i++) {
154 			if (strcmp(sym_table->syms[i].name, sym_name) == 0) {
155 				return sym_table->syms[i].addr;
156 			}
157 		}
158 	}
159 
160 	return NULL;
161 }
162 
llext_load(struct llext_loader * ldr,const char * name,struct llext ** ext,const struct llext_load_param * ldr_parm)163 int llext_load(struct llext_loader *ldr, const char *name, struct llext **ext,
164 	       const struct llext_load_param *ldr_parm)
165 {
166 	int ret;
167 
168 	*ext = llext_by_name(name);
169 
170 	k_mutex_lock(&llext_lock, K_FOREVER);
171 
172 	if (*ext) {
173 		/* The use count is at least 1 */
174 		ret = (*ext)->use_count++;
175 		goto out;
176 	}
177 
178 	*ext = llext_alloc(sizeof(struct llext));
179 	if (*ext == NULL) {
180 		LOG_ERR("Not enough memory for extension metadata");
181 		ret = -ENOMEM;
182 		goto out;
183 	}
184 
185 	ret = do_llext_load(ldr, *ext, ldr_parm);
186 	if (ret < 0) {
187 		llext_free(*ext);
188 		*ext = NULL;
189 		goto out;
190 	}
191 
192 	strncpy((*ext)->name, name, sizeof((*ext)->name));
193 	(*ext)->name[sizeof((*ext)->name) - 1] = '\0';
194 	(*ext)->use_count++;
195 
196 	sys_slist_append(&_llext_list, &(*ext)->_llext_list);
197 	LOG_INF("Loaded extension %s", (*ext)->name);
198 
199 out:
200 	k_mutex_unlock(&llext_lock);
201 	return ret;
202 }
203 
204 #include <zephyr/logging/log_ctrl.h>
205 
llext_log_flush(void)206 static void llext_log_flush(void)
207 {
208 #ifdef CONFIG_LOG_MODE_DEFERRED
209 	extern struct k_thread logging_thread;
210 	int cur_prio = k_thread_priority_get(k_current_get());
211 	int log_prio = k_thread_priority_get(&logging_thread);
212 	int target_prio;
213 	bool adjust_cur, adjust_log;
214 
215 	/*
216 	 * Our goal is to raise the logger thread priority above current, but if
217 	 * current has the highest possble priority, both need to be adjusted,
218 	 * particularly if the logger thread has the lowest possible priority
219 	 */
220 	if (log_prio < cur_prio) {
221 		adjust_cur = false;
222 		adjust_log = false;
223 		target_prio = 0;
224 	} else if (cur_prio == K_HIGHEST_THREAD_PRIO) {
225 		adjust_cur = true;
226 		adjust_log = true;
227 		target_prio = cur_prio;
228 		k_thread_priority_set(k_current_get(), cur_prio + 1);
229 	} else {
230 		adjust_cur = false;
231 		adjust_log = true;
232 		target_prio = cur_prio - 1;
233 	}
234 
235 	/* adjust logging thread priority if needed */
236 	if (adjust_log) {
237 		k_thread_priority_set(&logging_thread, target_prio);
238 	}
239 
240 	log_thread_trigger();
241 	k_yield();
242 
243 	if (adjust_log) {
244 		k_thread_priority_set(&logging_thread, log_prio);
245 	}
246 	if (adjust_cur) {
247 		k_thread_priority_set(&logging_thread, cur_prio);
248 	}
249 #endif
250 }
251 
llext_unload(struct llext ** ext)252 int llext_unload(struct llext **ext)
253 {
254 	__ASSERT(*ext, "Expected non-null extension");
255 	struct llext *tmp = *ext;
256 
257 	k_mutex_lock(&llext_lock, K_FOREVER);
258 
259 	llext_log_flush();
260 
261 	__ASSERT(tmp->use_count, "A valid LLEXT cannot have a zero use-count!");
262 
263 	if (tmp->use_count-- != 1) {
264 		unsigned int ret = tmp->use_count;
265 
266 		k_mutex_unlock(&llext_lock);
267 		return ret;
268 	}
269 
270 	/* FIXME: protect the global list */
271 	sys_slist_find_and_remove(&_llext_list, &tmp->_llext_list);
272 
273 	llext_dependency_remove_all(tmp);
274 
275 	*ext = NULL;
276 	k_mutex_unlock(&llext_lock);
277 
278 	if (tmp->sect_hdrs_on_heap) {
279 		llext_free(tmp->sect_hdrs);
280 	}
281 
282 	llext_free_regions(tmp);
283 	llext_free(tmp->sym_tab.syms);
284 	llext_free(tmp->exp_tab.syms);
285 	llext_free(tmp);
286 
287 	return 0;
288 }
289 
llext_call_fn(struct llext * ext,const char * sym_name)290 int llext_call_fn(struct llext *ext, const char *sym_name)
291 {
292 	void (*fn)(void);
293 
294 	fn = llext_find_sym(&ext->exp_tab, sym_name);
295 	if (fn == NULL) {
296 		return -ENOENT;
297 	}
298 	fn();
299 
300 	return 0;
301 }
302 
call_fn_table(struct llext * ext,bool is_init)303 static int call_fn_table(struct llext *ext, bool is_init)
304 {
305 	ssize_t ret;
306 
307 	ret = llext_get_fn_table(ext, is_init, NULL, 0);
308 	if (ret < 0) {
309 		LOG_ERR("Failed to get table size: %d", (int)ret);
310 		return ret;
311 	}
312 
313 	typedef void (*elf_void_fn_t)(void);
314 
315 	int fn_count = ret / sizeof(elf_void_fn_t);
316 	elf_void_fn_t fn_table[fn_count];
317 
318 	ret = llext_get_fn_table(ext, is_init, &fn_table, sizeof(fn_table));
319 	if (ret < 0) {
320 		LOG_ERR("Failed to get function table: %d", (int)ret);
321 		return ret;
322 	}
323 
324 	for (int i = 0; i < fn_count; i++) {
325 		LOG_DBG("calling %s function %p()",
326 			is_init ? "bringup" : "teardown", (void *)fn_table[i]);
327 		fn_table[i]();
328 	}
329 
330 	return 0;
331 }
332 
llext_bringup(struct llext * ext)333 inline int llext_bringup(struct llext *ext)
334 {
335 	return call_fn_table(ext, true);
336 }
337 
llext_teardown(struct llext * ext)338 inline int llext_teardown(struct llext *ext)
339 {
340 	return call_fn_table(ext, false);
341 }
342 
llext_bootstrap(struct llext * ext,llext_entry_fn_t entry_fn,void * user_data)343 void llext_bootstrap(struct llext *ext, llext_entry_fn_t entry_fn, void *user_data)
344 {
345 	int ret;
346 
347 	/* Call initialization functions */
348 	ret = llext_bringup(ext);
349 	if (ret < 0) {
350 		LOG_ERR("Failed to call init functions: %d", ret);
351 		return;
352 	}
353 
354 	/* Start extension main function */
355 	LOG_DBG("calling entry function %p(%p)", (void *)entry_fn, user_data);
356 	entry_fn(user_data);
357 
358 	/* Call de-initialization functions */
359 	ret = llext_teardown(ext);
360 	if (ret < 0) {
361 		LOG_ERR("Failed to call de-init functions: %d", ret);
362 		return;
363 	}
364 }
365