1 /*
2  * Copyright (c) 2024, Tenstorrent AI ULC
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #undef _POSIX_C_SOURCE
8 #define _POSIX_C_SOURCE 200809L
9 
10 #include <string.h>
11 #include <stdio.h>
12 
13 #include <kernel_arch_interface.h>
14 #include <zephyr/kernel.h>
15 #include <zephyr/kernel/mm.h>
16 #include <zephyr/posix/fcntl.h>
17 #include <zephyr/posix/sys/mman.h>
18 #include <zephyr/posix/unistd.h>
19 #include <zephyr/sys/dlist.h>
20 #include <zephyr/sys/fdtable.h>
21 #include <zephyr/sys/hash_function.h>
22 
23 #define _page_size COND_CODE_1(CONFIG_MMU, (CONFIG_MMU_PAGE_SIZE), (PAGE_SIZE))
24 
25 static const struct fd_op_vtable shm_vtable;
26 
27 static sys_dlist_t shm_list = SYS_DLIST_STATIC_INIT(&shm_list);
28 
29 struct shm_obj {
30 	uint8_t *mem;
31 	sys_dnode_t node;
32 	size_t refs;
33 	size_t size;
34 	uint32_t hash;
35 	bool unlinked: 1;
36 	bool mapped: 1;
37 };
38 
hash32(const char * str,size_t n)39 static inline uint32_t hash32(const char *str, size_t n)
40 {
41 	/* we need a hasher that is not sensitive to input alignment */
42 	return sys_hash32_djb2(str, n);
43 }
44 
shm_obj_name_valid(const char * name,size_t len)45 static bool shm_obj_name_valid(const char *name, size_t len)
46 {
47 	if (name == NULL) {
48 		return false;
49 	}
50 
51 	if (name[0] != '/') {
52 		return false;
53 	}
54 
55 	if (len < 2) {
56 		return false;
57 	}
58 
59 	return true;
60 }
61 
shm_obj_find(uint32_t key)62 static struct shm_obj *shm_obj_find(uint32_t key)
63 {
64 	struct shm_obj *shm;
65 
66 	SYS_DLIST_FOR_EACH_CONTAINER(&shm_list, shm, node) {
67 		if (shm->hash == key) {
68 			return shm;
69 		}
70 	}
71 
72 	return NULL;
73 }
74 
shm_obj_add(struct shm_obj * shm)75 static void shm_obj_add(struct shm_obj *shm)
76 {
77 	sys_dlist_init(&shm->node);
78 	sys_dlist_append(&shm_list, &shm->node);
79 }
80 
shm_obj_remove(struct shm_obj * shm)81 static void shm_obj_remove(struct shm_obj *shm)
82 {
83 	sys_dlist_remove(&shm->node);
84 	if (shm->size > 0) {
85 		if (IS_ENABLED(CONFIG_MMU)) {
86 			uintptr_t phys = 0;
87 
88 			if (arch_page_phys_get(shm->mem, &phys) == 0) {
89 				k_mem_unmap(shm->mem, ROUND_UP(shm->size, _page_size));
90 			}
91 		} else {
92 			k_free(shm->mem);
93 		}
94 	}
95 	k_free(shm);
96 }
97 
shm_fstat(struct shm_obj * shm,struct stat * st)98 static int shm_fstat(struct shm_obj *shm, struct stat *st)
99 {
100 	*st = (struct stat){0};
101 	st->st_mode = ZVFS_MODE_IFSHM;
102 	st->st_size = shm->size;
103 
104 	return 0;
105 }
106 
shm_ftruncate(struct shm_obj * shm,off_t length)107 static int shm_ftruncate(struct shm_obj *shm, off_t length)
108 {
109 	void *virt;
110 
111 	if (length < 0) {
112 		errno = EINVAL;
113 		return -1;
114 	}
115 
116 	if (length == 0) {
117 		if (shm->size != 0) {
118 			/* only allow resizing this once, for consistence */
119 			errno = EBUSY;
120 			return -1;
121 		}
122 
123 		return 0;
124 	}
125 
126 	if (IS_ENABLED(CONFIG_MMU)) {
127 		virt = k_mem_map(ROUND_UP(length, _page_size), K_MEM_PERM_RW);
128 	} else {
129 		virt = k_calloc(1, length);
130 	}
131 
132 	if (virt == NULL) {
133 		errno = ENOMEM;
134 		return -1;
135 	}
136 
137 	shm->mem = virt;
138 	shm->size = length;
139 
140 	return 0;
141 }
142 
shm_lseek(struct shm_obj * shm,off_t offset,int whence,size_t cur)143 static off_t shm_lseek(struct shm_obj *shm, off_t offset, int whence, size_t cur)
144 {
145 	size_t addend;
146 
147 	switch (whence) {
148 	case SEEK_SET:
149 		addend = 0;
150 		break;
151 	case SEEK_CUR:
152 		addend = cur;
153 		break;
154 	case SEEK_END:
155 		addend = shm->size;
156 		break;
157 	default:
158 		errno = EINVAL;
159 		return -1;
160 	}
161 
162 	if ((INTPTR_MAX - addend) < offset) {
163 		errno = EOVERFLOW;
164 		return -1;
165 	}
166 
167 	offset += addend;
168 	if (offset < 0) {
169 		errno = EINVAL;
170 		return -1;
171 	}
172 
173 	return offset;
174 }
175 
shm_mmap(struct shm_obj * shm,void * addr,size_t len,int prot,int flags,off_t off,void ** virt)176 static int shm_mmap(struct shm_obj *shm, void *addr, size_t len, int prot, int flags, off_t off,
177 		    void **virt)
178 {
179 	ARG_UNUSED(addr);
180 	ARG_UNUSED(prot);
181 	__ASSERT_NO_MSG(virt != NULL);
182 
183 	if ((len == 0) || (off < 0) || ((flags & MAP_FIXED) != 0) ||
184 	    ((off & (_page_size - 1)) != 0) || ((len + off) > shm->size)) {
185 		errno = EINVAL;
186 		return -1;
187 	}
188 
189 	if (!IS_ENABLED(CONFIG_MMU)) {
190 		errno = ENOTSUP;
191 		return -1;
192 	}
193 
194 	if (shm->mem == NULL) {
195 		errno = ENOMEM;
196 		return -1;
197 	}
198 
199 	/*
200 	 * Note: due to Zephyr's page mapping algorithm, physical pages can only have 1
201 	 * mapping, so different file handles will have the same virtual memory address
202 	 * underneath.
203 	 */
204 	*virt = shm->mem + off;
205 
206 	return 0;
207 }
208 
shm_rw(struct shm_obj * shm,void * buf,size_t size,bool is_write,size_t offset)209 static ssize_t shm_rw(struct shm_obj *shm, void *buf, size_t size, bool is_write, size_t offset)
210 {
211 	if (offset >= shm->size) {
212 		size = 0;
213 	} else {
214 		size = MIN(size, shm->size - offset);
215 	}
216 
217 	if (size > 0) {
218 		if (is_write) {
219 			memcpy(&shm->mem[offset], buf, size);
220 		} else {
221 			memcpy(buf, &shm->mem[offset], size);
222 		}
223 	}
224 
225 	return size;
226 }
227 
shm_read(void * obj,void * buf,size_t sz,size_t offset)228 static ssize_t shm_read(void *obj, void *buf, size_t sz, size_t offset)
229 {
230 	return shm_rw((struct shm_obj *)obj, buf, sz, false, offset);
231 }
232 
shm_write(void * obj,const void * buf,size_t sz,size_t offset)233 static ssize_t shm_write(void *obj, const void *buf, size_t sz, size_t offset)
234 {
235 	return shm_rw((struct shm_obj *)obj, (void *)buf, sz, true, offset);
236 }
237 
shm_close(void * obj)238 static int shm_close(void *obj)
239 {
240 	struct shm_obj *shm = obj;
241 
242 	shm->refs -= (shm->refs > 0) ? 1 : 0;
243 	if (shm->unlinked && (shm->refs == 0)) {
244 		shm_obj_remove(shm);
245 	}
246 
247 	return 0;
248 }
249 
shm_ioctl(void * obj,unsigned int request,va_list args)250 static int shm_ioctl(void *obj, unsigned int request, va_list args)
251 {
252 	struct shm_obj *shm = obj;
253 
254 	switch (request) {
255 	case ZFD_IOCTL_LSEEK: {
256 		off_t offset = va_arg(args, off_t);
257 		int whence = va_arg(args, int);
258 		size_t cur = va_arg(args, size_t);
259 
260 		return shm_lseek(shm, offset, whence, cur);
261 	} break;
262 	case ZFD_IOCTL_MMAP: {
263 		void *addr = va_arg(args, void *);
264 		size_t len = va_arg(args, size_t);
265 		int prot = va_arg(args, int);
266 		int flags = va_arg(args, int);
267 		off_t off = va_arg(args, off_t);
268 		void **maddr = va_arg(args, void **);
269 
270 		return shm_mmap(shm, addr, len, prot, flags, off, maddr);
271 	} break;
272 	case ZFD_IOCTL_SET_LOCK:
273 		break;
274 	case ZFD_IOCTL_STAT: {
275 		struct stat *st = va_arg(args, struct stat *);
276 
277 		return shm_fstat(shm, st);
278 	} break;
279 	case ZFD_IOCTL_TRUNCATE: {
280 		off_t length = va_arg(args, off_t);
281 
282 		return shm_ftruncate(shm, length);
283 	} break;
284 	default:
285 		errno = ENOTSUP;
286 		return -1;
287 	}
288 
289 	return 0;
290 }
291 
292 static const struct fd_op_vtable shm_vtable = {
293 	.read_offs = shm_read,
294 	.write_offs = shm_write,
295 	.close = shm_close,
296 	.ioctl = shm_ioctl,
297 };
298 
shm_open(const char * name,int oflag,mode_t mode)299 int shm_open(const char *name, int oflag, mode_t mode)
300 {
301 	int fd;
302 	uint32_t key;
303 	struct shm_obj *shm;
304 	bool rd = (oflag & O_RDONLY) != 0;
305 	bool rw = (oflag & O_RDWR) != 0;
306 	bool creat = (oflag & O_CREAT) != 0;
307 	bool excl = (oflag & O_EXCL) != 0;
308 	bool trunc = false; /* (oflag & O_TRUNC) != 0 */
309 	size_t name_len = (name == NULL) ? 0 : strnlen(name, PATH_MAX);
310 
311 	/* revisit when file-based permissions are available */
312 	if ((mode & 0777) == 0) {
313 		errno = EINVAL;
314 		return -1;
315 	}
316 
317 	if (!(rd ^ rw)) {
318 		errno = EINVAL;
319 		return -1;
320 	}
321 
322 	if (rd && trunc) {
323 		errno = EINVAL;
324 		return -1;
325 	}
326 
327 	if (!shm_obj_name_valid(name, name_len)) {
328 		errno = EINVAL;
329 		return -1;
330 	}
331 
332 	fd = zvfs_reserve_fd();
333 	if (fd < 0) {
334 		errno = EMFILE;
335 		return -1;
336 	}
337 
338 	key = hash32(name, name_len);
339 	shm = shm_obj_find(key);
340 	if ((shm != NULL) && shm->unlinked) {
341 		/* we cannot open a shm object that has already been unlinked */
342 		errno = EACCES;
343 		return -1;
344 	}
345 
346 	if (creat) {
347 		if ((shm != NULL) && excl) {
348 			zvfs_free_fd(fd);
349 			errno = EEXIST;
350 			return -1;
351 		}
352 
353 		if (shm == NULL) {
354 			shm = k_calloc(1, sizeof(*shm));
355 			if (shm == NULL) {
356 				zvfs_free_fd(fd);
357 				errno = ENOSPC;
358 				return -1;
359 			}
360 
361 			shm->hash = key;
362 			shm_obj_add(shm);
363 		}
364 	} else if (shm == NULL) {
365 		errno = ENOENT;
366 		return -1;
367 	}
368 
369 	++shm->refs;
370 	zvfs_finalize_typed_fd(fd, shm, &shm_vtable, ZVFS_MODE_IFSHM);
371 
372 	return fd;
373 }
374 
shm_unlink(const char * name)375 int shm_unlink(const char *name)
376 {
377 	uint32_t key;
378 	struct shm_obj *shm;
379 	size_t name_len = (name == NULL) ? 0 : strnlen(name, PATH_MAX);
380 
381 	if (!shm_obj_name_valid(name, name_len)) {
382 		errno = EINVAL;
383 		return -1;
384 	}
385 
386 	key = hash32(name, name_len);
387 	shm = shm_obj_find(key);
388 	if ((shm == NULL) || shm->unlinked) {
389 		errno = ENOENT;
390 		return -1;
391 	}
392 
393 	shm->unlinked = true;
394 	if (shm->refs == 0) {
395 		shm_obj_remove(shm);
396 	}
397 
398 	return 0;
399 }
400