1 /*
2 * Copyright (c) 2023 Intel Corporation
3 * Copyright (c) 2024 Arduino SA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/sys/util.h>
9 #include <zephyr/llext/loader.h>
10 #include <zephyr/llext/llext.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/cache.h>
13
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_DECLARE(llext, CONFIG_LLEXT_LOG_LEVEL);
16
17 #include <string.h>
18
19 #include "llext_priv.h"
20
21 #ifdef CONFIG_MMU_PAGE_SIZE
22 #define LLEXT_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
23 #else
24 /* Arm's MPU wants a 32 byte minimum mpu region */
25 #define LLEXT_PAGE_SIZE 32
26 #endif
27
28 K_HEAP_DEFINE(llext_heap, CONFIG_LLEXT_HEAP_SIZE * 1024);
29
30 /*
31 * Initialize the memory partition associated with the specified memory region
32 */
llext_init_mem_part(struct llext * ext,enum llext_mem mem_idx,uintptr_t start,size_t len)33 static void llext_init_mem_part(struct llext *ext, enum llext_mem mem_idx,
34 uintptr_t start, size_t len)
35 {
36 #ifdef CONFIG_USERSPACE
37 if (mem_idx < LLEXT_MEM_PARTITIONS) {
38 ext->mem_parts[mem_idx].start = start;
39 ext->mem_parts[mem_idx].size = len;
40
41 switch (mem_idx) {
42 case LLEXT_MEM_TEXT:
43 ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RX_U_RX;
44 break;
45 case LLEXT_MEM_DATA:
46 case LLEXT_MEM_BSS:
47 ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RW_U_RW;
48 break;
49 case LLEXT_MEM_RODATA:
50 ext->mem_parts[mem_idx].attr = K_MEM_PARTITION_P_RO_U_RO;
51 break;
52 default:
53 break;
54 }
55 }
56 #endif
57
58 LOG_DBG("region %d: start 0x%zx, size %zd", mem_idx, (size_t)start, len);
59 }
60
llext_copy_section(struct llext_loader * ldr,struct llext * ext,enum llext_mem mem_idx,const struct llext_load_param * ldr_parm)61 static int llext_copy_section(struct llext_loader *ldr, struct llext *ext,
62 enum llext_mem mem_idx, const struct llext_load_param *ldr_parm)
63 {
64 int ret;
65
66 if (!ldr->sects[mem_idx].sh_size) {
67 return 0;
68 }
69 ext->mem_size[mem_idx] = ldr->sects[mem_idx].sh_size;
70
71 if (IS_ENABLED(CONFIG_LLEXT_STORAGE_WRITABLE)) {
72 if (ldr->sects[mem_idx].sh_type != SHT_NOBITS) {
73 /* Directly use data from the ELF buffer if peek() is supported */
74 ext->mem[mem_idx] = llext_peek(ldr, ldr->sects[mem_idx].sh_offset);
75 if (ext->mem[mem_idx]) {
76 llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
77 ldr->sects[mem_idx].sh_size);
78 ext->mem_on_heap[mem_idx] = false;
79 return 0;
80 }
81 } else if (ldr_parm && ldr_parm->pre_located) {
82 /*
83 * ldr_parm cannot be NULL here with the current flow, but
84 * we add a check to make it future-proof
85 */
86 ext->mem[mem_idx] = NULL;
87 ext->mem_on_heap[mem_idx] = false;
88 return 0;
89 }
90 }
91
92 if (ldr_parm && ldr_parm->pre_located) {
93 return -EFAULT;
94 }
95
96 /* On ARM with an MPU a pow(2, N)*32 sized and aligned region is needed,
97 * otherwise its typically an mmu page (sized and aligned memory region)
98 * we are after that we can assign memory permission bits on.
99 */
100 #ifndef CONFIG_ARM_MPU
101 const uintptr_t sect_alloc = ROUND_UP(ldr->sects[mem_idx].sh_size, LLEXT_PAGE_SIZE);
102 const uintptr_t sect_align = LLEXT_PAGE_SIZE;
103 #else
104 uintptr_t sect_alloc = LLEXT_PAGE_SIZE;
105
106 while (sect_alloc < ldr->sects[mem_idx].sh_size) {
107 sect_alloc *= 2;
108 }
109 uintptr_t sect_align = sect_alloc;
110 #endif
111
112 ext->mem[mem_idx] = llext_aligned_alloc(sect_align, sect_alloc);
113 if (!ext->mem[mem_idx]) {
114 return -ENOMEM;
115 }
116
117 ext->alloc_size += sect_alloc;
118
119 llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
120 sect_alloc);
121
122 if (ldr->sects[mem_idx].sh_type == SHT_NOBITS) {
123 memset(ext->mem[mem_idx], 0, ldr->sects[mem_idx].sh_size);
124 } else {
125 ret = llext_seek(ldr, ldr->sects[mem_idx].sh_offset);
126 if (ret != 0) {
127 goto err;
128 }
129
130 ret = llext_read(ldr, ext->mem[mem_idx], ldr->sects[mem_idx].sh_size);
131 if (ret != 0) {
132 goto err;
133 }
134 }
135
136 ext->mem_on_heap[mem_idx] = true;
137
138 return 0;
139
140 err:
141 llext_free(ext->mem[mem_idx]);
142 ext->mem[mem_idx] = NULL;
143 return ret;
144 }
145
llext_copy_strings(struct llext_loader * ldr,struct llext * ext)146 int llext_copy_strings(struct llext_loader *ldr, struct llext *ext)
147 {
148 int ret = llext_copy_section(ldr, ext, LLEXT_MEM_SHSTRTAB, NULL);
149
150 if (!ret) {
151 ret = llext_copy_section(ldr, ext, LLEXT_MEM_STRTAB, NULL);
152 }
153
154 return ret;
155 }
156
llext_copy_regions(struct llext_loader * ldr,struct llext * ext,const struct llext_load_param * ldr_parm)157 int llext_copy_regions(struct llext_loader *ldr, struct llext *ext,
158 const struct llext_load_param *ldr_parm)
159 {
160 for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_COUNT; mem_idx++) {
161 /* strings have already been copied */
162 if (ext->mem[mem_idx]) {
163 continue;
164 }
165
166 int ret = llext_copy_section(ldr, ext, mem_idx, ldr_parm);
167
168 if (ret < 0) {
169 return ret;
170 }
171 }
172
173 return 0;
174 }
175
llext_adjust_mmu_permissions(struct llext * ext)176 void llext_adjust_mmu_permissions(struct llext *ext)
177 {
178 #ifdef CONFIG_MMU
179 void *addr;
180 size_t size;
181 uint32_t flags;
182
183 for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_PARTITIONS; mem_idx++) {
184 addr = ext->mem[mem_idx];
185 size = ROUND_UP(ext->mem_size[mem_idx], LLEXT_PAGE_SIZE);
186 if (size == 0) {
187 continue;
188 }
189 switch (mem_idx) {
190 case LLEXT_MEM_TEXT:
191 sys_cache_instr_invd_range(addr, size);
192 flags = K_MEM_PERM_EXEC;
193 break;
194 case LLEXT_MEM_DATA:
195 case LLEXT_MEM_BSS:
196 /* memory is already K_MEM_PERM_RW by default */
197 continue;
198 case LLEXT_MEM_RODATA:
199 flags = 0;
200 break;
201 default:
202 continue;
203 }
204 sys_cache_data_flush_range(addr, size);
205 k_mem_update_flags(addr, size, flags);
206 }
207 #endif
208 }
209
llext_free_regions(struct llext * ext)210 void llext_free_regions(struct llext *ext)
211 {
212 for (int i = 0; i < LLEXT_MEM_COUNT; i++) {
213 #ifdef CONFIG_MMU
214 if (ext->mem_size[i] != 0) {
215 /* restore default RAM permissions */
216 k_mem_update_flags(ext->mem[i],
217 ROUND_UP(ext->mem_size[i], LLEXT_PAGE_SIZE),
218 K_MEM_PERM_RW);
219 }
220 #endif
221 if (ext->mem_on_heap[i]) {
222 LOG_DBG("freeing memory region %d", i);
223 llext_free(ext->mem[i]);
224 ext->mem[i] = NULL;
225 }
226 }
227 }
228
llext_add_domain(struct llext * ext,struct k_mem_domain * domain)229 int llext_add_domain(struct llext *ext, struct k_mem_domain *domain)
230 {
231 #ifdef CONFIG_USERSPACE
232 int ret = 0;
233
234 for (int i = 0; i < LLEXT_MEM_PARTITIONS; i++) {
235 if (ext->mem_size[i] == 0) {
236 continue;
237 }
238 ret = k_mem_domain_add_partition(domain, &ext->mem_parts[i]);
239 if (ret != 0) {
240 LOG_ERR("Failed adding memory partition %d to domain %p",
241 i, domain);
242 return ret;
243 }
244 }
245
246 return ret;
247 #else
248 return -ENOSYS;
249 #endif
250 }
251