1 /*
2 * Copyright (c) 2024, Tenstorrent AI ULC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8 #include <stdio.h>
9
10 #include <kernel_arch_interface.h>
11 #include <zephyr/kernel.h>
12 #include <zephyr/kernel/mm.h>
13 #include <zephyr/posix/fcntl.h>
14 #include <zephyr/posix/sys/mman.h>
15 #include <zephyr/posix/unistd.h>
16 #include <zephyr/sys/dlist.h>
17 #include <zephyr/sys/fdtable.h>
18 #include <zephyr/sys/hash_function.h>
19
20 #define _page_size COND_CODE_1(CONFIG_MMU, (CONFIG_MMU_PAGE_SIZE), (PAGE_SIZE))
21
22 static const struct fd_op_vtable shm_vtable;
23
24 static sys_dlist_t shm_list = SYS_DLIST_STATIC_INIT(&shm_list);
25
26 struct shm_obj {
27 uint8_t *mem;
28 sys_dnode_t node;
29 size_t refs;
30 size_t size;
31 uint32_t hash;
32 bool unlinked: 1;
33 bool mapped: 1;
34 };
35
hash32(const char * str,size_t n)36 static inline uint32_t hash32(const char *str, size_t n)
37 {
38 /* we need a hasher that is not sensitive to input alignment */
39 return sys_hash32_djb2(str, n);
40 }
41
shm_obj_name_valid(const char * name,size_t len)42 static bool shm_obj_name_valid(const char *name, size_t len)
43 {
44 if (name == NULL) {
45 return false;
46 }
47
48 if (name[0] != '/') {
49 return false;
50 }
51
52 if (len < 2) {
53 return false;
54 }
55
56 return true;
57 }
58
shm_obj_find(uint32_t key)59 static struct shm_obj *shm_obj_find(uint32_t key)
60 {
61 struct shm_obj *shm;
62
63 SYS_DLIST_FOR_EACH_CONTAINER(&shm_list, shm, node) {
64 if (shm->hash == key) {
65 return shm;
66 }
67 }
68
69 return NULL;
70 }
71
shm_obj_add(struct shm_obj * shm)72 static void shm_obj_add(struct shm_obj *shm)
73 {
74 sys_dlist_init(&shm->node);
75 sys_dlist_append(&shm_list, &shm->node);
76 }
77
shm_obj_remove(struct shm_obj * shm)78 static void shm_obj_remove(struct shm_obj *shm)
79 {
80 sys_dlist_remove(&shm->node);
81 if (shm->size > 0) {
82 if (IS_ENABLED(CONFIG_MMU)) {
83 uintptr_t phys = 0;
84
85 if (arch_page_phys_get(shm->mem, &phys) == 0) {
86 k_mem_unmap(shm->mem, ROUND_UP(shm->size, _page_size));
87 }
88 } else {
89 k_free(shm->mem);
90 }
91 }
92 k_free(shm);
93 }
94
shm_fstat(struct shm_obj * shm,struct stat * st)95 static int shm_fstat(struct shm_obj *shm, struct stat *st)
96 {
97 *st = (struct stat){0};
98 st->st_mode = ZVFS_MODE_IFSHM;
99 st->st_size = shm->size;
100
101 return 0;
102 }
103
shm_ftruncate(struct shm_obj * shm,off_t length)104 static int shm_ftruncate(struct shm_obj *shm, off_t length)
105 {
106 void *virt;
107
108 if (length < 0) {
109 errno = EINVAL;
110 return -1;
111 }
112
113 if (length == 0) {
114 if (shm->size != 0) {
115 /* only allow resizing this once, for consistence */
116 errno = EBUSY;
117 return -1;
118 }
119
120 return 0;
121 }
122
123 if (IS_ENABLED(CONFIG_MMU)) {
124 virt = k_mem_map(ROUND_UP(length, _page_size), K_MEM_PERM_RW);
125 } else {
126 virt = k_calloc(1, length);
127 }
128
129 if (virt == NULL) {
130 errno = ENOMEM;
131 return -1;
132 }
133
134 shm->mem = virt;
135 shm->size = length;
136
137 return 0;
138 }
139
shm_lseek(struct shm_obj * shm,off_t offset,int whence,size_t cur)140 static off_t shm_lseek(struct shm_obj *shm, off_t offset, int whence, size_t cur)
141 {
142 size_t addend;
143
144 switch (whence) {
145 case SEEK_SET:
146 addend = 0;
147 break;
148 case SEEK_CUR:
149 addend = cur;
150 break;
151 case SEEK_END:
152 addend = shm->size;
153 break;
154 default:
155 errno = EINVAL;
156 return -1;
157 }
158
159 if ((INTPTR_MAX - addend) < offset) {
160 errno = EOVERFLOW;
161 return -1;
162 }
163
164 offset += addend;
165 if (offset < 0) {
166 errno = EINVAL;
167 return -1;
168 }
169
170 return offset;
171 }
172
shm_mmap(struct shm_obj * shm,void * addr,size_t len,int prot,int flags,off_t off,void ** virt)173 static int shm_mmap(struct shm_obj *shm, void *addr, size_t len, int prot, int flags, off_t off,
174 void **virt)
175 {
176 ARG_UNUSED(addr);
177 ARG_UNUSED(prot);
178 __ASSERT_NO_MSG(virt != NULL);
179
180 if ((len == 0) || (off < 0) || ((flags & MAP_FIXED) != 0) ||
181 ((off & (_page_size - 1)) != 0) || ((len + off) > shm->size)) {
182 errno = EINVAL;
183 return -1;
184 }
185
186 if (!IS_ENABLED(CONFIG_MMU)) {
187 errno = ENOTSUP;
188 return -1;
189 }
190
191 if (shm->mem == NULL) {
192 errno = ENOMEM;
193 return -1;
194 }
195
196 /*
197 * Note: due to Zephyr's page mapping algorithm, physical pages can only have 1
198 * mapping, so different file handles will have the same virtual memory address
199 * underneath.
200 */
201 *virt = shm->mem + off;
202
203 return 0;
204 }
205
shm_rw(struct shm_obj * shm,void * buf,size_t size,bool is_write,size_t offset)206 static ssize_t shm_rw(struct shm_obj *shm, void *buf, size_t size, bool is_write, size_t offset)
207 {
208 if (offset >= shm->size) {
209 size = 0;
210 } else {
211 size = MIN(size, shm->size - offset);
212 }
213
214 if (size > 0) {
215 if (is_write) {
216 memcpy(&shm->mem[offset], buf, size);
217 } else {
218 memcpy(buf, &shm->mem[offset], size);
219 }
220 }
221
222 return size;
223 }
224
shm_read(void * obj,void * buf,size_t sz,size_t offset)225 static ssize_t shm_read(void *obj, void *buf, size_t sz, size_t offset)
226 {
227 return shm_rw((struct shm_obj *)obj, buf, sz, false, offset);
228 }
229
shm_write(void * obj,const void * buf,size_t sz,size_t offset)230 static ssize_t shm_write(void *obj, const void *buf, size_t sz, size_t offset)
231 {
232 return shm_rw((struct shm_obj *)obj, (void *)buf, sz, true, offset);
233 }
234
shm_close(void * obj)235 static int shm_close(void *obj)
236 {
237 struct shm_obj *shm = obj;
238
239 shm->refs -= (shm->refs > 0) ? 1 : 0;
240 if (shm->unlinked && (shm->refs == 0)) {
241 shm_obj_remove(shm);
242 }
243
244 return 0;
245 }
246
shm_ioctl(void * obj,unsigned int request,va_list args)247 static int shm_ioctl(void *obj, unsigned int request, va_list args)
248 {
249 struct shm_obj *shm = obj;
250
251 switch (request) {
252 case ZFD_IOCTL_LSEEK: {
253 off_t offset = va_arg(args, off_t);
254 int whence = va_arg(args, int);
255 size_t cur = va_arg(args, size_t);
256
257 return shm_lseek(shm, offset, whence, cur);
258 } break;
259 case ZFD_IOCTL_MMAP: {
260 void *addr = va_arg(args, void *);
261 size_t len = va_arg(args, size_t);
262 int prot = va_arg(args, int);
263 int flags = va_arg(args, int);
264 off_t off = va_arg(args, off_t);
265 void **maddr = va_arg(args, void **);
266
267 return shm_mmap(shm, addr, len, prot, flags, off, maddr);
268 } break;
269 case ZFD_IOCTL_SET_LOCK:
270 break;
271 case ZFD_IOCTL_STAT: {
272 struct stat *st = va_arg(args, struct stat *);
273
274 return shm_fstat(shm, st);
275 } break;
276 case ZFD_IOCTL_TRUNCATE: {
277 off_t length = va_arg(args, off_t);
278
279 return shm_ftruncate(shm, length);
280 } break;
281 default:
282 errno = ENOTSUP;
283 return -1;
284 }
285
286 return 0;
287 }
288
289 static const struct fd_op_vtable shm_vtable = {
290 .read_offs = shm_read,
291 .write_offs = shm_write,
292 .close = shm_close,
293 .ioctl = shm_ioctl,
294 };
295
shm_open(const char * name,int oflag,mode_t mode)296 int shm_open(const char *name, int oflag, mode_t mode)
297 {
298 int fd;
299 uint32_t key;
300 struct shm_obj *shm;
301 bool rd = (oflag & O_RDONLY) != 0;
302 bool rw = (oflag & O_RDWR) != 0;
303 bool creat = (oflag & O_CREAT) != 0;
304 bool excl = (oflag & O_EXCL) != 0;
305 bool trunc = false; /* (oflag & O_TRUNC) != 0 */
306 size_t name_len = (name == NULL) ? 0 : strnlen(name, PATH_MAX);
307
308 /* revisit when file-based permissions are available */
309 if ((mode & 0777) == 0) {
310 errno = EINVAL;
311 return -1;
312 }
313
314 if (!(rd ^ rw)) {
315 errno = EINVAL;
316 return -1;
317 }
318
319 if (rd && trunc) {
320 errno = EINVAL;
321 return -1;
322 }
323
324 if (!shm_obj_name_valid(name, name_len)) {
325 errno = EINVAL;
326 return -1;
327 }
328
329 fd = zvfs_reserve_fd();
330 if (fd < 0) {
331 errno = EMFILE;
332 return -1;
333 }
334
335 key = hash32(name, name_len);
336 shm = shm_obj_find(key);
337 if ((shm != NULL) && shm->unlinked) {
338 /* we cannot open a shm object that has already been unlinked */
339 errno = EACCES;
340 return -1;
341 }
342
343 if (creat) {
344 if ((shm != NULL) && excl) {
345 zvfs_free_fd(fd);
346 errno = EEXIST;
347 return -1;
348 }
349
350 if (shm == NULL) {
351 shm = k_calloc(1, sizeof(*shm));
352 if (shm == NULL) {
353 zvfs_free_fd(fd);
354 errno = ENOSPC;
355 return -1;
356 }
357
358 shm->hash = key;
359 shm_obj_add(shm);
360 }
361 } else if (shm == NULL) {
362 errno = ENOENT;
363 return -1;
364 }
365
366 ++shm->refs;
367 zvfs_finalize_typed_fd(fd, shm, &shm_vtable, ZVFS_MODE_IFSHM);
368
369 return fd;
370 }
371
shm_unlink(const char * name)372 int shm_unlink(const char *name)
373 {
374 uint32_t key;
375 struct shm_obj *shm;
376 size_t name_len = (name == NULL) ? 0 : strnlen(name, PATH_MAX);
377
378 if (!shm_obj_name_valid(name, name_len)) {
379 errno = EINVAL;
380 return -1;
381 }
382
383 key = hash32(name, name_len);
384 shm = shm_obj_find(key);
385 if ((shm == NULL) || shm->unlinked) {
386 errno = ENOENT;
387 return -1;
388 }
389
390 shm->unlinked = true;
391 if (shm->refs == 0) {
392 shm_obj_remove(shm);
393 }
394
395 return 0;
396 }
397