1 /*
2  * Copyright (c) 2017 Linaro Limited
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <init.h>
8 #include <kernel.h>
9 #include <kernel_structs.h>
10 #include <kernel_internal.h>
11 #include <sys/__assert.h>
12 #include <stdbool.h>
13 #include <spinlock.h>
14 #include <sys/libc-hooks.h>
15 #include <logging/log.h>
16 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
17 
18 struct k_spinlock z_mem_domain_lock;
19 static uint8_t max_partitions;
20 
21 struct k_mem_domain k_mem_domain_default;
22 
23 #if __ASSERT_ON
check_add_partition(struct k_mem_domain * domain,struct k_mem_partition * part)24 static bool check_add_partition(struct k_mem_domain *domain,
25 				struct k_mem_partition *part)
26 {
27 
28 	int i;
29 	uintptr_t pstart, pend, dstart, dend;
30 
31 	if (part == NULL) {
32 		LOG_ERR("NULL k_mem_partition provided");
33 		return false;
34 	}
35 
36 #ifdef CONFIG_EXECUTE_XOR_WRITE
37 	/* Arches where execution cannot be disabled should always return
38 	 * false to this check
39 	 */
40 	if (K_MEM_PARTITION_IS_EXECUTABLE(part->attr) &&
41 	    K_MEM_PARTITION_IS_WRITABLE(part->attr)) {
42 		LOG_ERR("partition is writable and executable <start %lx>",
43 			part->start);
44 		return false;
45 	}
46 #endif
47 
48 	if (part->size == 0U) {
49 		LOG_ERR("zero sized partition at %p with base 0x%lx",
50 			part, part->start);
51 		return false;
52 	}
53 
54 	pstart = part->start;
55 	pend = part->start + part->size;
56 
57 	if (pend <= pstart) {
58 		LOG_ERR("invalid partition %p, wraparound detected. base 0x%lx size %zu",
59 			part, part->start, part->size);
60 		return false;
61 	}
62 
63 	/* Check that this partition doesn't overlap any existing ones already
64 	 * in the domain
65 	 */
66 	for (i = 0; i < domain->num_partitions; i++) {
67 		struct k_mem_partition *dpart = &domain->partitions[i];
68 
69 		if (dpart->size == 0U) {
70 			/* Unused slot */
71 			continue;
72 		}
73 
74 		dstart = dpart->start;
75 		dend = dstart + dpart->size;
76 
77 		if (pend > dstart && dend > pstart) {
78 			LOG_ERR("partition %p base %lx (size %zu) overlaps existing base %lx (size %zu)",
79 				part, part->start, part->size,
80 				dpart->start, dpart->size);
81 			return false;
82 		}
83 	}
84 
85 	return true;
86 }
87 #endif
88 
k_mem_domain_init(struct k_mem_domain * domain,uint8_t num_parts,struct k_mem_partition * parts[])89 void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
90 		       struct k_mem_partition *parts[])
91 {
92 	k_spinlock_key_t key;
93 
94 	__ASSERT_NO_MSG(domain != NULL);
95 	__ASSERT(num_parts == 0U || parts != NULL,
96 		 "parts array is NULL and num_parts is nonzero");
97 	__ASSERT(num_parts <= max_partitions,
98 		 "num_parts of %d exceeds maximum allowable partitions (%d)",
99 		 num_parts, max_partitions);
100 
101 	key = k_spin_lock(&z_mem_domain_lock);
102 
103 	domain->num_partitions = 0U;
104 	(void)memset(domain->partitions, 0, sizeof(domain->partitions));
105 	sys_dlist_init(&domain->mem_domain_q);
106 
107 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
108 	int ret = arch_mem_domain_init(domain);
109 
110 	/* TODO propagate return values, see #24609.
111 	 *
112 	 * Not using an assertion here as this is a memory allocation error
113 	 */
114 	if (ret != 0) {
115 		LOG_ERR("architecture-specific initialization failed for domain %p with %d",
116 			domain, ret);
117 		k_panic();
118 	}
119 #endif
120 	if (num_parts != 0U) {
121 		uint32_t i;
122 
123 		for (i = 0U; i < num_parts; i++) {
124 			__ASSERT(check_add_partition(domain, parts[i]),
125 				 "invalid partition index %d (%p)",
126 				 i, parts[i]);
127 
128 			domain->partitions[i] = *parts[i];
129 			domain->num_partitions++;
130 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
131 			arch_mem_domain_partition_add(domain, i);
132 #endif
133 		}
134 	}
135 
136 	k_spin_unlock(&z_mem_domain_lock, key);
137 }
138 
k_mem_domain_add_partition(struct k_mem_domain * domain,struct k_mem_partition * part)139 void k_mem_domain_add_partition(struct k_mem_domain *domain,
140 				struct k_mem_partition *part)
141 {
142 	int p_idx;
143 	k_spinlock_key_t key;
144 
145 	__ASSERT_NO_MSG(domain != NULL);
146 	__ASSERT(check_add_partition(domain, part),
147 		 "invalid partition %p", part);
148 
149 	key = k_spin_lock(&z_mem_domain_lock);
150 
151 	for (p_idx = 0; p_idx < max_partitions; p_idx++) {
152 		/* A zero-sized partition denotes it's a free partition */
153 		if (domain->partitions[p_idx].size == 0U) {
154 			break;
155 		}
156 	}
157 
158 	__ASSERT(p_idx < max_partitions,
159 		 "no free partition slots available");
160 
161 	LOG_DBG("add partition base %lx size %zu to domain %p\n",
162 		part->start, part->size, domain);
163 
164 	domain->partitions[p_idx].start = part->start;
165 	domain->partitions[p_idx].size = part->size;
166 	domain->partitions[p_idx].attr = part->attr;
167 
168 	domain->num_partitions++;
169 
170 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
171 	arch_mem_domain_partition_add(domain, p_idx);
172 #endif
173 	k_spin_unlock(&z_mem_domain_lock, key);
174 }
175 
k_mem_domain_remove_partition(struct k_mem_domain * domain,struct k_mem_partition * part)176 void k_mem_domain_remove_partition(struct k_mem_domain *domain,
177 				  struct k_mem_partition *part)
178 {
179 	int p_idx;
180 	k_spinlock_key_t key;
181 
182 	__ASSERT_NO_MSG(domain != NULL);
183 	__ASSERT_NO_MSG(part != NULL);
184 
185 	key = k_spin_lock(&z_mem_domain_lock);
186 
187 	/* find a partition that matches the given start and size */
188 	for (p_idx = 0; p_idx < max_partitions; p_idx++) {
189 		if (domain->partitions[p_idx].start == part->start &&
190 		    domain->partitions[p_idx].size == part->size) {
191 			break;
192 		}
193 	}
194 
195 	__ASSERT(p_idx < max_partitions, "no matching partition found");
196 
197 	LOG_DBG("remove partition base %lx size %zu from domain %p\n",
198 		part->start, part->size, domain);
199 
200 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
201 	arch_mem_domain_partition_remove(domain, p_idx);
202 #endif
203 
204 	/* A zero-sized partition denotes it's a free partition */
205 	domain->partitions[p_idx].size = 0U;
206 
207 	domain->num_partitions--;
208 
209 	k_spin_unlock(&z_mem_domain_lock, key);
210 }
211 
add_thread_locked(struct k_mem_domain * domain,k_tid_t thread)212 static void add_thread_locked(struct k_mem_domain *domain,
213 			      k_tid_t thread)
214 {
215 	__ASSERT_NO_MSG(domain != NULL);
216 	__ASSERT_NO_MSG(thread != NULL);
217 
218 	LOG_DBG("add thread %p to domain %p\n", thread, domain);
219 	sys_dlist_append(&domain->mem_domain_q,
220 			 &thread->mem_domain_info.mem_domain_q_node);
221 	thread->mem_domain_info.mem_domain = domain;
222 
223 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
224 	arch_mem_domain_thread_add(thread);
225 #endif
226 }
227 
remove_thread_locked(struct k_thread * thread)228 static void remove_thread_locked(struct k_thread *thread)
229 {
230 	__ASSERT_NO_MSG(thread != NULL);
231 	LOG_DBG("remove thread %p from memory domain %p\n",
232 		thread, thread->mem_domain_info.mem_domain);
233 	sys_dlist_remove(&thread->mem_domain_info.mem_domain_q_node);
234 
235 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
236 	arch_mem_domain_thread_remove(thread);
237 #endif
238 }
239 
240 /* Called from thread object initialization */
z_mem_domain_init_thread(struct k_thread * thread)241 void z_mem_domain_init_thread(struct k_thread *thread)
242 {
243 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
244 
245 	/* New threads inherit memory domain configuration from parent */
246 	add_thread_locked(_current->mem_domain_info.mem_domain, thread);
247 	k_spin_unlock(&z_mem_domain_lock, key);
248 }
249 
250 /* Called when thread aborts during teardown tasks. sched_spinlock is held */
z_mem_domain_exit_thread(struct k_thread * thread)251 void z_mem_domain_exit_thread(struct k_thread *thread)
252 {
253 	k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
254 	remove_thread_locked(thread);
255 	k_spin_unlock(&z_mem_domain_lock, key);
256 }
257 
k_mem_domain_add_thread(struct k_mem_domain * domain,k_tid_t thread)258 void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
259 {
260 	k_spinlock_key_t key;
261 
262 	key = k_spin_lock(&z_mem_domain_lock);
263 	if (thread->mem_domain_info.mem_domain != domain) {
264 		remove_thread_locked(thread);
265 		add_thread_locked(domain, thread);
266 	}
267 	k_spin_unlock(&z_mem_domain_lock, key);
268 }
269 
init_mem_domain_module(const struct device * arg)270 static int init_mem_domain_module(const struct device *arg)
271 {
272 	ARG_UNUSED(arg);
273 
274 	max_partitions = arch_mem_domain_max_partitions_get();
275 	/*
276 	 * max_partitions must be less than or equal to
277 	 * CONFIG_MAX_DOMAIN_PARTITIONS, or would encounter array index
278 	 * out of bounds error.
279 	 */
280 	__ASSERT(max_partitions <= CONFIG_MAX_DOMAIN_PARTITIONS, "");
281 
282 	k_mem_domain_init(&k_mem_domain_default, 0, NULL);
283 #ifdef Z_LIBC_PARTITION_EXISTS
284 	k_mem_domain_add_partition(&k_mem_domain_default, &z_libc_partition);
285 #endif /* Z_LIBC_PARTITION_EXISTS */
286 
287 	return 0;
288 }
289 
290 SYS_INIT(init_mem_domain_module, PRE_KERNEL_1,
291 	 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
292