1 /*
2  * Copyright (c) 2017 Linaro Limited
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/init.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <kernel_internal.h>
11 #include <zephyr/sys/__assert.h>
12 #include <stdbool.h>
13 #include <zephyr/spinlock.h>
14 #include <zephyr/sys/check.h>
15 #include <zephyr/sys/libc-hooks.h>
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
18 
19 struct k_spinlock z_mem_domain_lock;
20 static uint8_t max_partitions;
21 
22 struct k_mem_domain k_mem_domain_default;
23 
check_add_partition(struct k_mem_domain * domain,struct k_mem_partition * part)24 static bool check_add_partition(struct k_mem_domain *domain,
25 				struct k_mem_partition *part)
26 {
27 
28 	int i;
29 	uintptr_t pstart, pend, dstart, dend;
30 
31 	if (part == NULL) {
32 		LOG_ERR("NULL k_mem_partition provided");
33 		return false;
34 	}
35 
36 #ifdef CONFIG_EXECUTE_XOR_WRITE
37 	/* Arches where execution cannot be disabled should always return
38 	 * false to this check
39 	 */
40 	if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) &&
41 	    K_MEM_PARTITION_IS_WRITABLE(part->attr)) {
42 		LOG_ERR("partition is writable and executable <start %lx>",
43 			part->start);
44 		return false;
45 	}
46 #endif /* CONFIG_EXECUTE_XOR_WRITE */
47 
48 	if (part->size == 0U) {
49 		LOG_ERR("zero sized partition at %p with base 0x%lx",
50 			part, part->start);
51 		return false;
52 	}
53 
54 	pstart = part->start;
55 	pend = part->start + part->size;
56 
57 	if (pend <= pstart) {
58 		LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu",
59 			part, part->start, part->size);
60 		return false;
61 	}
62 
63 	/* Check that this partition doesn't overlap any existing ones already
64 	 * in the domain
65 	 */
66 	for (i = 0; i < domain->num_partitions; i++) {
67 		struct k_mem_partition *dpart = &domain->partitions[i];
68 
69 		if (dpart->size == 0U) {
70 			/* Unused slot */
71 			continue;
72 		}
73 
74 		dstart = dpart->start;
75 		dend = dstart + dpart->size;
76 
77 		if (pend > dstart && dend > pstart) {
78 			LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)",
79 				part, part->start, part->size,
80 				dpart->start, dpart->size);
81 			return false;
82 		}
83 	}
84 
85 	return true;
86 }
87 
k_mem_domain_init(struct k_mem_domain * domain,uint8_t num_parts,struct k_mem_partition * parts[])88 int k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
89 		      struct k_mem_partition *parts[])
90 {
91 	k_spinlock_key_t key;
92 	int ret = 0;
93 
94 	CHECKIF(domain == NULL) {
95 		ret = -EINVAL;
96 		goto out;
97 	}
98 
99 	CHECKIF(!(num_parts == 0U || parts != NULL)) {
100 		LOG_ERR("parts array is NULL and num_parts is nonzero");
101 		ret = -EINVAL;
102 		goto out;
103 	}
104 
105 	CHECKIF(!(num_parts <= max_partitions)) {
106 		LOG_ERR("num_parts of %d exceeds maximum allowable partitions (%d)",
107 			num_parts, max_partitions);
108 		ret = -EINVAL;
109 		goto out;
110 	}
111 
112 	key = k_spin_lock(&z_mem_domain_lock);
113 
114 	domain->num_partitions = 0U;
115 	(void)memset(domain->partitions, 0, sizeof(domain->partitions));
116 
117 #ifdef CONFIG_MEM_DOMAIN_HAS_THREAD_LIST
118 	sys_dlist_init(&domain->thread_mem_domain_list);
119 #endif /* CONFIG_MEM_DOMAIN_HAS_THREAD_LIST */
120 
121 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
122 	ret = arch_mem_domain_init(domain);
123 
124 	if (ret != 0) {
125 		LOG_ERR("architecture-specific initialization failed for domain %p with %d",
126 			domain, ret);
127 		ret = -ENOMEM;
128 		goto unlock_out;
129 	}
130 #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
131 	if (num_parts != 0U) {
132 		uint32_t i;
133 
134 		for (i = 0U; i < num_parts; i++) {
135 			CHECKIF(!check_add_partition(domain, parts[i])) {
136 				LOG_ERR("invalid partition index %d (%p)",
137 					i, parts[i]);
138 				ret = -EINVAL;
139 				goto unlock_out;
140 			}
141 
142 			domain->partitions[i] = *parts[i];
143 			domain->num_partitions++;
144 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
145 			int ret2 = arch_mem_domain_partition_add(domain, i);
146 
147 			ARG_UNUSED(ret2);
148 			CHECKIF(ret2 != 0) {
149 				ret = ret2;
150 			}
151 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
152 		}
153 	}
154 
155 unlock_out:
156 	k_spin_unlock(&z_mem_domain_lock, key);
157 
158 out:
159 	return ret;
160 }
161 
k_mem_domain_add_partition(struct k_mem_domain * domain,struct k_mem_partition * part)162 int k_mem_domain_add_partition(struct k_mem_domain *domain,
163 			       struct k_mem_partition *part)
164 {
165 	int p_idx;
166 	k_spinlock_key_t key;
167 	int ret = 0;
168 
169 	CHECKIF(domain == NULL) {
170 		ret = -EINVAL;
171 		goto out;
172 	}
173 
174 	CHECKIF(!check_add_partition(domain, part)) {
175 		LOG_ERR("invalid partition %p", part);
176 		ret = -EINVAL;
177 		goto out;
178 	}
179 
180 	key = k_spin_lock(&z_mem_domain_lock);
181 
182 	for (p_idx = 0; p_idx < max_partitions; p_idx++) {
183 		/* A zero-sized partition denotes it's a free partition */
184 		if (domain->partitions[p_idx].size == 0U) {
185 			break;
186 		}
187 	}
188 
189 	CHECKIF(!(p_idx < max_partitions)) {
190 		LOG_ERR("no free partition slots available");
191 		ret = -ENOSPC;
192 		goto unlock_out;
193 	}
194 
195 	LOG_DBG("add partition base %lx size %zu to domain %p",
196 		part->start, part->size, domain);
197 
198 	domain->partitions[p_idx].start = part->start;
199 	domain->partitions[p_idx].size = part->size;
200 	domain->partitions[p_idx].attr = part->attr;
201 
202 	domain->num_partitions++;
203 
204 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
205 	ret = arch_mem_domain_partition_add(domain, p_idx);
206 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
207 
208 unlock_out:
209 	k_spin_unlock(&z_mem_domain_lock, key);
210 
211 out:
212 	return ret;
213 }
214 
k_mem_domain_remove_partition(struct k_mem_domain * domain,struct k_mem_partition * part)215 int k_mem_domain_remove_partition(struct k_mem_domain *domain,
216 				  struct k_mem_partition *part)
217 {
218 	int p_idx;
219 	k_spinlock_key_t key;
220 	int ret = 0;
221 
222 	CHECKIF((domain == NULL) || (part == NULL)) {
223 		ret = -EINVAL;
224 		goto out;
225 	}
226 
227 	key = k_spin_lock(&z_mem_domain_lock);
228 
229 	/* find a partition that matches the given start and size */
230 	for (p_idx = 0; p_idx < max_partitions; p_idx++) {
231 		if ((domain->partitions[p_idx].start == part->start) &&
232 		    (domain->partitions[p_idx].size == part->size)) {
233 			break;
234 		}
235 	}
236 
237 	CHECKIF(!(p_idx < max_partitions)) {
238 		LOG_ERR("no matching partition found");
239 		ret = -ENOENT;
240 		goto unlock_out;
241 	}
242 
243 	LOG_DBG("remove partition base %lx size %zu from domain %p",
244 		part->start, part->size, domain);
245 
246 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
247 	ret = arch_mem_domain_partition_remove(domain, p_idx);
248 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
249 
250 	/* A zero-sized partition denotes it's a free partition */
251 	domain->partitions[p_idx].size = 0U;
252 
253 	domain->num_partitions--;
254 
255 unlock_out:
256 	k_spin_unlock(&z_mem_domain_lock, key);
257 
258 out:
259 	return ret;
260 }
261 
add_thread_locked(struct k_mem_domain * domain,k_tid_t thread)262 static int add_thread_locked(struct k_mem_domain *domain,
263 			     k_tid_t thread)
264 {
265 	int ret = 0;
266 
267 	__ASSERT_NO_MSG(domain != NULL);
268 	__ASSERT_NO_MSG(thread != NULL);
269 
270 	LOG_DBG("add thread %p to domain %p", thread, domain);
271 
272 #ifdef CONFIG_MEM_DOMAIN_HAS_THREAD_LIST
273 	sys_dlist_append(&domain->thread_mem_domain_list,
274 			 &thread->mem_domain_info.thread_mem_domain_node);
275 #endif /* CONFIG_MEM_DOMAIN_HAS_THREAD_LIST */
276 
277 	thread->mem_domain_info.mem_domain = domain;
278 
279 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
280 	ret = arch_mem_domain_thread_add(thread);
281 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
282 
283 	return ret;
284 }
285 
remove_thread_locked(struct k_thread * thread)286 static int remove_thread_locked(struct k_thread *thread)
287 {
288 	int ret = 0;
289 
290 	__ASSERT_NO_MSG(thread != NULL);
291 	LOG_DBG("remove thread %p from memory domain %p",
292 		thread, thread->mem_domain_info.mem_domain);
293 
294 #ifdef CONFIG_MEM_DOMAIN_HAS_THREAD_LIST
295 	sys_dlist_remove(&thread->mem_domain_info.thread_mem_domain_node);
296 #endif /* CONFIG_MEM_DOMAIN_HAS_THREAD_LIST */
297 
298 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
299 	ret = arch_mem_domain_thread_remove(thread);
300 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
301 
302 	return ret;
303 }
304 
305 /* Called from thread object initialization */
z_mem_domain_init_thread(struct k_thread * thread)306 void z_mem_domain_init_thread(struct k_thread *thread)
307 {
308 	int ret;
309 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
310 
311 	/* New threads inherit memory domain configuration from parent */
312 	ret = add_thread_locked(_current->mem_domain_info.mem_domain, thread);
313 	__ASSERT_NO_MSG(ret == 0);
314 	ARG_UNUSED(ret);
315 
316 	k_spin_unlock(&z_mem_domain_lock, key);
317 }
318 
319 /* Called when thread aborts during teardown tasks. _sched_spinlock is held */
z_mem_domain_exit_thread(struct k_thread * thread)320 void z_mem_domain_exit_thread(struct k_thread *thread)
321 {
322 	int ret;
323 
324 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
325 
326 	ret = remove_thread_locked(thread);
327 	__ASSERT_NO_MSG(ret == 0);
328 	ARG_UNUSED(ret);
329 
330 	k_spin_unlock(&z_mem_domain_lock, key);
331 }
332 
k_mem_domain_add_thread(struct k_mem_domain * domain,k_tid_t thread)333 int k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
334 {
335 	int ret = 0;
336 	k_spinlock_key_t key;
337 
338 	key = k_spin_lock(&z_mem_domain_lock);
339 	if (thread->mem_domain_info.mem_domain != domain) {
340 		ret = remove_thread_locked(thread);
341 
342 		if (ret == 0) {
343 			ret = add_thread_locked(domain, thread);
344 		}
345 	}
346 	k_spin_unlock(&z_mem_domain_lock, key);
347 
348 	return ret;
349 }
350 
init_mem_domain_module(void)351 static int init_mem_domain_module(void)
352 {
353 	int ret;
354 
355 	ARG_UNUSED(ret);
356 
357 	max_partitions = arch_mem_domain_max_partitions_get();
358 	/*
359 	 * max_partitions must be less than or equal to
360 	 * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
361 	 * out of bounds error.
362 	 */
363 	__ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
364 
365 	ret = k_mem_domain_init(&k_mem_domain_default, 0, NULL);
366 	__ASSERT(ret == 0, "failed to init default mem domain");
367 
368 #ifdef Z_LIBC_PARTITION_EXISTS
369 	ret = k_mem_domain_add_partition(&k_mem_domain_default,
370 					 &z_libc_partition);
371 	__ASSERT(ret == 0, "failed to add default libc mem partition");
372 #endif /* Z_LIBC_PARTITION_EXISTS */
373 
374 	return 0;
375 }
376 
377 SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
378 	 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
379