1 /*
2  * Copyright (c) 2023 Intel Corporation
3  * Copyright (c) 2024 Arduino SA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/loader.h>
10 #include <zephyr/llext/llext.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/cache.h>
13 
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
16 
17 #include <string.h>
18 
19 #include "llext_priv.h"
20 
21 #ifdef CONFIG_MMU_PAGE_SIZE
22 #define LLEXT_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
23 #else
24 /* Arm's MPU wants a 32 byte minimum mpu region */
25 #define LLEXT_PAGE_SIZE 32
26 #endif
27 
28 K_HEAP_DEFINE(llext_heap, CONFIG_LLEXT_HEAP_SIZE * 1024);
29 
30 /*
31  * Initialize the memory partition associated with the specified memory region
32  */
llext_init_mem_part(struct llext * ext,enum llext_mem mem_idx,uintptr_t start,size_t len)33 static void llext_init_mem_part(struct llext *ext, enum llext_mem mem_idx,
34 			uintptr_t start, size_t len)
35 {
36 #ifdef CONFIG_USERSPACE
37 	if (mem_idx < LLEXT_MEM_PARTITIONS) {
38 		ext->mem_parts[mem_idx].start = start;
39 		ext->mem_parts[mem_idx].size = len;
40 
41 		switch (mem_idx) {
42 		case LLEXT_MEM_TEXT:
43 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RX_U_RX;
44 			break;
45 		case LLEXT_MEM_DATA:
46 		case LLEXT_MEM_BSS:
47 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RW_U_RW;
48 			break;
49 		case LLEXT_MEM_RODATA:
50 			ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RO_U_RO;
51 			break;
52 		default:
53 			break;
54 		}
55 	}
56 #endif
57 
58 	LOG_DBG("region %d: start %#zx, size %zd", mem_idx, (size_t)start, len);
59 }
60 
llext_copy_region(struct llext_loader * ldr,struct llext * ext,enum llext_mem mem_idx,const struct llext_load_param * ldr_parm)61 static int llext_copy_region(struct llext_loader *ldr, struct llext *ext,
62 			      enum llext_mem mem_idx, const struct llext_load_param *ldr_parm)
63 {
64 	int ret;
65 	elf_shdr_t *region = ldr->sects + mem_idx;
66 	uintptr_t region_alloc = region->sh_size;
67 	uintptr_t region_align = region->sh_addralign;
68 
69 	if (!region_alloc) {
70 		return 0;
71 	}
72 	ext->mem_size[mem_idx] = region_alloc;
73 
74 	if (IS_ENABLED(CONFIG_LLEXT_STORAGE_WRITABLE)) {
75 		/*
76 		 * Try to reuse data areas from the ELF buffer, if possible.
77 		 * If any of the following tests fail, a normal allocation
78 		 * will be attempted.
79 		 */
80 		if (region->sh_type != SHT_NOBITS) {
81 			/* Region has data in the file, check if peek() is supported */
82 			ext->mem[mem_idx] = llext_peek(ldr, region->sh_offset);
83 			if (ext->mem[mem_idx]) {
84 				if (IS_ALIGNED(ext->mem[mem_idx], region_align) ||
85 				    ldr_parm->pre_located) {
86 					/* Map this region directly to the ELF buffer */
87 					llext_init_mem_part(ext, mem_idx,
88 							    (uintptr_t)ext->mem[mem_idx],
89 							    region_alloc);
90 					ext->mem_on_heap[mem_idx] = false;
91 					return 0;
92 				}
93 
94 				LOG_WRN("Cannot peek region %d: %p not aligned to %#zx",
95 					mem_idx, ext->mem[mem_idx], (size_t)region_align);
96 			}
97 		} else if (ldr_parm->pre_located) {
98 			/*
99 			 * In pre-located files all regions, including BSS,
100 			 * are placed by the user with a linker script. No
101 			 * additional memory allocation is needed here.
102 			 */
103 			ext->mem[mem_idx] = NULL;
104 			ext->mem_on_heap[mem_idx] = false;
105 			return 0;
106 		}
107 	}
108 
109 	if (ldr_parm->pre_located) {
110 		/*
111 		 * The ELF file is supposed to be pre-located, but some
112 		 * regions are not accessible or not in the correct place.
113 		 */
114 		return -EFAULT;
115 	}
116 
117 	/*
118 	 * Calculate the desired region size and alignment for a new allocation.
119 	 */
120 	if (IS_ENABLED(CONFIG_ARM_MPU)) {
121 		/* On ARM with an MPU, regions must be sized and aligned to the same
122 		 * power of two (larger than 32).
123 		 */
124 		uintptr_t block_size = MAX(MAX(region_alloc, region_align), LLEXT_PAGE_SIZE);
125 
126 		block_size = 1 << LOG2CEIL(block_size); /* align to next power of two */
127 		region_alloc = block_size;
128 		region_align = block_size;
129 	} else {
130 		/* Otherwise, round the region to multiples of LLEXT_PAGE_SIZE. */
131 		region_alloc = ROUND_UP(region_alloc, LLEXT_PAGE_SIZE);
132 		region_align = MAX(region_align, LLEXT_PAGE_SIZE);
133 	}
134 
135 	ext->mem[mem_idx] = llext_aligned_alloc(region_align, region_alloc);
136 	if (!ext->mem[mem_idx]) {
137 		LOG_ERR("Failed allocating %zd bytes %zd-aligned for region %d",
138 			(size_t)region_alloc, (size_t)region_align, mem_idx);
139 		return -ENOMEM;
140 	}
141 
142 	ext->alloc_size += region_alloc;
143 
144 	llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
145 		region_alloc);
146 
147 	if (region->sh_type == SHT_NOBITS) {
148 		memset(ext->mem[mem_idx], 0, region->sh_size);
149 	} else {
150 		uintptr_t base = (uintptr_t)ext->mem[mem_idx];
151 		size_t offset = region->sh_offset;
152 		size_t length = region->sh_size;
153 
154 		if (region->sh_flags & SHF_ALLOC) {
155 			/* zero out any prepad bytes, not part of the data area */
156 			size_t prepad = region->sh_info;
157 
158 			memset((void *)base, 0, prepad);
159 			base += prepad;
160 			offset += prepad;
161 			length -= prepad;
162 		}
163 
164 		/* actual data area without prepad bytes */
165 		ret = llext_seek(ldr, offset);
166 		if (ret != 0) {
167 			goto err;
168 		}
169 
170 		ret = llext_read(ldr, (void *)base, length);
171 		if (ret != 0) {
172 			goto err;
173 		}
174 	}
175 
176 	ext->mem_on_heap[mem_idx] = true;
177 
178 	return 0;
179 
180 err:
181 	llext_free(ext->mem[mem_idx]);
182 	ext->mem[mem_idx] = NULL;
183 	return ret;
184 }
185 
llext_copy_strings(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)186 int llext_copy_strings(struct llext_loader *ldr, struct llext *ext,
187 		       const struct llext_load_param *ldr_parm)
188 {
189 	int ret = llext_copy_region(ldr, ext, LLEXT_MEM_SHSTRTAB, ldr_parm);
190 
191 	if (!ret) {
192 		ret = llext_copy_region(ldr, ext, LLEXT_MEM_STRTAB, ldr_parm);
193 	}
194 
195 	return ret;
196 }
197 
llext_copy_regions(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)198 int llext_copy_regions(struct llext_loader *ldr, struct llext *ext,
199 		       const struct llext_load_param *ldr_parm)
200 {
201 	for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_COUNT; mem_idx++) {
202 		/* strings have already been copied */
203 		if (ext->mem[mem_idx]) {
204 			continue;
205 		}
206 
207 		int ret = llext_copy_region(ldr, ext, mem_idx, ldr_parm);
208 
209 		if (ret < 0) {
210 			return ret;
211 		}
212 	}
213 
214 	if (IS_ENABLED(CONFIG_LLEXT_LOG_LEVEL_DBG)) {
215 		LOG_DBG("gdb add-symbol-file flags:");
216 		for (int i = 0; i < ext->sect_cnt; ++i) {
217 			elf_shdr_t *shdr = ext->sect_hdrs + i;
218 			enum llext_mem mem_idx = ldr->sect_map[i].mem_idx;
219 			const char *name = llext_section_name(ldr, ext, shdr);
220 
221 			/* only show sections mapped to program memory */
222 			if (mem_idx < LLEXT_MEM_EXPORT) {
223 				LOG_DBG("-s %s %#zx", name,
224 					(size_t)ext->mem[mem_idx] + ldr->sect_map[i].offset);
225 			}
226 		}
227 	}
228 
229 	return 0;
230 }
231 
llext_adjust_mmu_permissions(struct llext * ext)232 void llext_adjust_mmu_permissions(struct llext *ext)
233 {
234 #ifdef CONFIG_MMU
235 	void *addr;
236 	size_t size;
237 	uint32_t flags;
238 
239 	for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_PARTITIONS; mem_idx++) {
240 		addr = ext->mem[mem_idx];
241 		size = ROUND_UP(ext->mem_size[mem_idx], LLEXT_PAGE_SIZE);
242 		if (size == 0) {
243 			continue;
244 		}
245 		switch (mem_idx) {
246 		case LLEXT_MEM_TEXT:
247 			sys_cache_instr_invd_range(addr, size);
248 			flags = K_MEM_PERM_EXEC;
249 			break;
250 		case LLEXT_MEM_DATA:
251 		case LLEXT_MEM_BSS:
252 			/* memory is already K_MEM_PERM_RW by default */
253 			continue;
254 		case LLEXT_MEM_RODATA:
255 			flags = 0;
256 			break;
257 		default:
258 			continue;
259 		}
260 		sys_cache_data_flush_range(addr, size);
261 		k_mem_update_flags(addr, size, flags);
262 	}
263 
264 	ext->mmu_permissions_set = true;
265 #endif
266 }
267 
llext_free_regions(struct llext * ext)268 void llext_free_regions(struct llext *ext)
269 {
270 	for (int i = 0; i < LLEXT_MEM_COUNT; i++) {
271 #ifdef CONFIG_MMU
272 		if (ext->mmu_permissions_set && ext->mem_size[i] != 0) {
273 			/* restore default RAM permissions */
274 			k_mem_update_flags(ext->mem[i],
275 					   ROUND_UP(ext->mem_size[i], LLEXT_PAGE_SIZE),
276 					   K_MEM_PERM_RW);
277 		}
278 #endif
279 		if (ext->mem_on_heap[i]) {
280 			LOG_DBG("freeing memory region %d", i);
281 			llext_free(ext->mem[i]);
282 			ext->mem[i] = NULL;
283 		}
284 	}
285 }
286 
llext_add_domain(struct llext * ext,struct k_mem_domain * domain)287 int llext_add_domain(struct llext *ext, struct k_mem_domain *domain)
288 {
289 #ifdef CONFIG_USERSPACE
290 	int ret = 0;
291 
292 	for (int i = 0; i < LLEXT_MEM_PARTITIONS; i++) {
293 		if (ext->mem_size[i] == 0) {
294 			continue;
295 		}
296 		ret = k_mem_domain_add_partition(domain, &ext->mem_parts[i]);
297 		if (ret != 0) {
298 			LOG_ERR("Failed adding memory partition %d to domain %p",
299 				i, domain);
300 			return ret;
301 		}
302 	}
303 
304 	return ret;
305 #else
306 	return -ENOSYS;
307 #endif
308 }
309