1 /*
2  * Copyright (c) 2018 Linaro Limited
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief File descriptor table
10  *
11  * This file provides generic file descriptor table implementation, suitable
12  * for any I/O object implementing POSIX I/O semantics (i.e. read/write +
13  * aux operations).
14  */
15 
16 #include <errno.h>
17 #include <string.h>
18 
19 #include <zephyr/posix/fcntl.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/sys/speculation.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/atomic.h>
25 
26 struct stat;
27 
28 struct fd_entry {
29 	void *obj;
30 	const struct fd_op_vtable *vtable;
31 	atomic_t refcount;
32 	struct k_mutex lock;
33 	struct k_condvar cond;
34 	size_t offset;
35 	uint32_t mode;
36 };
37 
38 #if defined(CONFIG_POSIX_DEVICE_IO)
39 static const struct fd_op_vtable stdinout_fd_op_vtable;
40 
41 BUILD_ASSERT(CONFIG_ZVFS_OPEN_MAX >= 3, "CONFIG_ZVFS_OPEN_MAX >= 3 for CONFIG_POSIX_DEVICE_IO");
42 #endif /* defined(CONFIG_POSIX_DEVICE_IO) */
43 
44 static struct fd_entry fdtable[CONFIG_ZVFS_OPEN_MAX] = {
45 #if defined(CONFIG_POSIX_DEVICE_IO)
46 	/*
47 	 * Predefine entries for stdin/stdout/stderr.
48 	 */
49 	{
50 		/* STDIN */
51 		.vtable = &stdinout_fd_op_vtable,
52 		.refcount = ATOMIC_INIT(1),
53 		.lock = Z_MUTEX_INITIALIZER(fdtable[0].lock),
54 		.cond = Z_CONDVAR_INITIALIZER(fdtable[0].cond),
55 	},
56 	{
57 		/* STDOUT */
58 		.vtable = &stdinout_fd_op_vtable,
59 		.refcount = ATOMIC_INIT(1),
60 		.lock = Z_MUTEX_INITIALIZER(fdtable[1].lock),
61 		.cond = Z_CONDVAR_INITIALIZER(fdtable[1].cond),
62 	},
63 	{
64 		/* STDERR */
65 		.vtable = &stdinout_fd_op_vtable,
66 		.refcount = ATOMIC_INIT(1),
67 		.lock = Z_MUTEX_INITIALIZER(fdtable[2].lock),
68 		.cond = Z_CONDVAR_INITIALIZER(fdtable[2].cond),
69 	},
70 #else
71 	{0},
72 #endif
73 };
74 
75 static K_MUTEX_DEFINE(fdtable_lock);
76 
z_fd_ref(int fd)77 static int z_fd_ref(int fd)
78 {
79 	return atomic_inc(&fdtable[fd].refcount) + 1;
80 }
81 
z_fd_unref(int fd)82 static int z_fd_unref(int fd)
83 {
84 	atomic_val_t old_rc;
85 
86 	/* Reference counter must be checked to avoid decrement refcount below
87 	 * zero causing file descriptor leak. Loop statement below executes
88 	 * atomic decrement if refcount value is grater than zero. Otherwise,
89 	 * refcount is not going to be written.
90 	 */
91 	do {
92 		old_rc = atomic_get(&fdtable[fd].refcount);
93 		if (!old_rc) {
94 			return 0;
95 		}
96 	} while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1));
97 
98 	if (old_rc != 1) {
99 		return old_rc - 1;
100 	}
101 
102 	fdtable[fd].obj = NULL;
103 	fdtable[fd].vtable = NULL;
104 
105 	return 0;
106 }
107 
_find_fd_entry(void)108 static int _find_fd_entry(void)
109 {
110 	int fd;
111 
112 	for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
113 		if (!atomic_get(&fdtable[fd].refcount)) {
114 			return fd;
115 		}
116 	}
117 
118 	errno = ENFILE;
119 	return -1;
120 }
121 
_check_fd(int fd)122 static int _check_fd(int fd)
123 {
124 	if ((fd < 0) || (fd >= ARRAY_SIZE(fdtable))) {
125 		errno = EBADF;
126 		return -1;
127 	}
128 
129 	fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable));
130 
131 	if (!atomic_get(&fdtable[fd].refcount)) {
132 		errno = EBADF;
133 		return -1;
134 	}
135 
136 	return 0;
137 }
138 
139 #ifdef CONFIG_ZTEST
fdtable_fd_is_initialized(int fd)140 bool fdtable_fd_is_initialized(int fd)
141 {
142 	struct k_mutex ref_lock;
143 	struct k_condvar ref_cond;
144 
145 	if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
146 		return false;
147 	}
148 
149 	ref_lock = (struct k_mutex)Z_MUTEX_INITIALIZER(fdtable[fd].lock);
150 	if (memcmp(&ref_lock, &fdtable[fd].lock, sizeof(ref_lock)) != 0) {
151 		return false;
152 	}
153 
154 	ref_cond = (struct k_condvar)Z_CONDVAR_INITIALIZER(fdtable[fd].cond);
155 	if (memcmp(&ref_cond, &fdtable[fd].cond, sizeof(ref_cond)) != 0) {
156 		return false;
157 	}
158 
159 	return true;
160 }
161 #endif /* CONFIG_ZTEST */
162 
zvfs_get_fd_obj(int fd,const struct fd_op_vtable * vtable,int err)163 void *zvfs_get_fd_obj(int fd, const struct fd_op_vtable *vtable, int err)
164 {
165 	struct fd_entry *entry;
166 
167 	if (_check_fd(fd) < 0) {
168 		return NULL;
169 	}
170 
171 	entry = &fdtable[fd];
172 
173 	if ((vtable != NULL) && (entry->vtable != vtable)) {
174 		errno = err;
175 		return NULL;
176 	}
177 
178 	return entry->obj;
179 }
180 
z_get_fd_by_obj_and_vtable(void * obj,const struct fd_op_vtable * vtable)181 static int z_get_fd_by_obj_and_vtable(void *obj, const struct fd_op_vtable *vtable)
182 {
183 	int fd;
184 
185 	for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
186 		if (fdtable[fd].obj == obj && fdtable[fd].vtable == vtable) {
187 			return fd;
188 		}
189 	}
190 
191 	errno = ENFILE;
192 	return -1;
193 }
194 
zvfs_get_obj_lock_and_cond(void * obj,const struct fd_op_vtable * vtable,struct k_mutex ** lock,struct k_condvar ** cond)195 bool zvfs_get_obj_lock_and_cond(void *obj, const struct fd_op_vtable *vtable, struct k_mutex **lock,
196 			     struct k_condvar **cond)
197 {
198 	int fd;
199 	struct fd_entry *entry;
200 
201 	fd = z_get_fd_by_obj_and_vtable(obj, vtable);
202 	if (_check_fd(fd) < 0) {
203 		return false;
204 	}
205 
206 	entry = &fdtable[fd];
207 
208 	if (lock) {
209 		*lock = &entry->lock;
210 	}
211 
212 	if (cond) {
213 		*cond = &entry->cond;
214 	}
215 
216 	return true;
217 }
218 
zvfs_get_fd_obj_and_vtable(int fd,const struct fd_op_vtable ** vtable,struct k_mutex ** lock)219 void *zvfs_get_fd_obj_and_vtable(int fd, const struct fd_op_vtable **vtable,
220 			      struct k_mutex **lock)
221 {
222 	struct fd_entry *entry;
223 
224 	if (_check_fd(fd) < 0) {
225 		return NULL;
226 	}
227 
228 	entry = &fdtable[fd];
229 	*vtable = entry->vtable;
230 
231 	if (lock != NULL) {
232 		*lock = &entry->lock;
233 	}
234 
235 	return entry->obj;
236 }
237 
zvfs_reserve_fd(void)238 int zvfs_reserve_fd(void)
239 {
240 	int fd;
241 
242 	(void)k_mutex_lock(&fdtable_lock, K_FOREVER);
243 
244 	fd = _find_fd_entry();
245 	if (fd >= 0) {
246 		/* Mark entry as used, zvfs_finalize_fd() will fill it in. */
247 		(void)z_fd_ref(fd);
248 		fdtable[fd].obj = NULL;
249 		fdtable[fd].vtable = NULL;
250 		k_mutex_init(&fdtable[fd].lock);
251 		k_condvar_init(&fdtable[fd].cond);
252 	}
253 
254 	k_mutex_unlock(&fdtable_lock);
255 
256 	return fd;
257 }
258 
zvfs_finalize_typed_fd(int fd,void * obj,const struct fd_op_vtable * vtable,uint32_t mode)259 void zvfs_finalize_typed_fd(int fd, void *obj, const struct fd_op_vtable *vtable, uint32_t mode)
260 {
261 	/* Assumes fd was already bounds-checked. */
262 #ifdef CONFIG_USERSPACE
263 	/* descriptor context objects are inserted into the table when they
264 	 * are ready for use. Mark the object as initialized and grant the
265 	 * caller (and only the caller) access.
266 	 *
267 	 * This call is a no-op if obj is invalid or points to something
268 	 * not a kernel object.
269 	 */
270 	k_object_recycle(obj);
271 #endif
272 	fdtable[fd].obj = obj;
273 	fdtable[fd].vtable = vtable;
274 	fdtable[fd].mode = mode;
275 
276 	/* Let the object know about the lock just in case it needs it
277 	 * for something. For BSD sockets, the lock is used with condition
278 	 * variables to avoid keeping the lock for a long period of time.
279 	 */
280 	if (vtable && vtable->ioctl) {
281 		(void)zvfs_fdtable_call_ioctl(vtable, obj, ZFD_IOCTL_SET_LOCK,
282 					   &fdtable[fd].lock);
283 	}
284 }
285 
zvfs_free_fd(int fd)286 void zvfs_free_fd(int fd)
287 {
288 	/* Assumes fd was already bounds-checked. */
289 	(void)z_fd_unref(fd);
290 }
291 
zvfs_alloc_fd(void * obj,const struct fd_op_vtable * vtable)292 int zvfs_alloc_fd(void *obj, const struct fd_op_vtable *vtable)
293 {
294 	int fd;
295 
296 	fd = zvfs_reserve_fd();
297 	if (fd >= 0) {
298 		zvfs_finalize_fd(fd, obj, vtable);
299 	}
300 
301 	return fd;
302 }
303 
zvfs_read(int fd,void * buf,size_t sz)304 ssize_t zvfs_read(int fd, void *buf, size_t sz)
305 {
306 	ssize_t res;
307 
308 	if (_check_fd(fd) < 0) {
309 		return -1;
310 	}
311 
312 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
313 	res = fdtable[fd].vtable->read_offs(fdtable[fd].obj, buf, sz, fdtable[fd].offset);
314 	if (res > 0) {
315 		switch (fdtable[fd].mode & ZVFS_MODE_IFMT) {
316 		case ZVFS_MODE_IFDIR:
317 		case ZVFS_MODE_IFBLK:
318 		case ZVFS_MODE_IFSHM:
319 		case ZVFS_MODE_IFREG:
320 			fdtable[fd].offset += res;
321 			break;
322 		default:
323 			break;
324 		}
325 	}
326 	k_mutex_unlock(&fdtable[fd].lock);
327 
328 	return res;
329 }
330 
zvfs_write(int fd,const void * buf,size_t sz)331 ssize_t zvfs_write(int fd, const void *buf, size_t sz)
332 {
333 	ssize_t res;
334 
335 	if (_check_fd(fd) < 0) {
336 		return -1;
337 	}
338 
339 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
340 	res = fdtable[fd].vtable->write_offs(fdtable[fd].obj, buf, sz, fdtable[fd].offset);
341 	if (res > 0) {
342 		switch (fdtable[fd].mode & ZVFS_MODE_IFMT) {
343 		case ZVFS_MODE_IFDIR:
344 		case ZVFS_MODE_IFBLK:
345 		case ZVFS_MODE_IFSHM:
346 		case ZVFS_MODE_IFREG:
347 			fdtable[fd].offset += res;
348 			break;
349 		default:
350 			break;
351 		}
352 	}
353 	k_mutex_unlock(&fdtable[fd].lock);
354 
355 	return res;
356 }
357 
zvfs_close(int fd)358 int zvfs_close(int fd)
359 {
360 	int res;
361 
362 	if (_check_fd(fd) < 0) {
363 		return -1;
364 	}
365 
366 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
367 
368 	res = fdtable[fd].vtable->close(fdtable[fd].obj);
369 
370 	k_mutex_unlock(&fdtable[fd].lock);
371 
372 	zvfs_free_fd(fd);
373 
374 	return res;
375 }
376 
zvfs_fstat(int fd,struct stat * buf)377 int zvfs_fstat(int fd, struct stat *buf)
378 {
379 	if (_check_fd(fd) < 0) {
380 		return -1;
381 	}
382 
383 	return zvfs_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_STAT, buf);
384 }
385 
zvfs_fsync(int fd)386 int zvfs_fsync(int fd)
387 {
388 	if (_check_fd(fd) < 0) {
389 		return -1;
390 	}
391 
392 	return zvfs_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_FSYNC);
393 }
394 
zvfs_lseek_wrap(int fd,int cmd,...)395 static inline off_t zvfs_lseek_wrap(int fd, int cmd, ...)
396 {
397 	off_t res;
398 	va_list args;
399 
400 	__ASSERT_NO_MSG(fd < ARRAY_SIZE(fdtable));
401 
402 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
403 	va_start(args, cmd);
404 	res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
405 	va_end(args);
406 	if (res >= 0) {
407 		switch (fdtable[fd].mode & ZVFS_MODE_IFMT) {
408 		case ZVFS_MODE_IFDIR:
409 		case ZVFS_MODE_IFBLK:
410 		case ZVFS_MODE_IFSHM:
411 		case ZVFS_MODE_IFREG:
412 			fdtable[fd].offset = res;
413 			break;
414 		default:
415 			break;
416 		}
417 	}
418 	k_mutex_unlock(&fdtable[fd].lock);
419 
420 	return res;
421 }
422 
zvfs_lseek(int fd,off_t offset,int whence)423 off_t zvfs_lseek(int fd, off_t offset, int whence)
424 {
425 	if (_check_fd(fd) < 0) {
426 		return -1;
427 	}
428 
429 	return zvfs_lseek_wrap(fd, ZFD_IOCTL_LSEEK, offset, whence, fdtable[fd].offset);
430 }
431 
zvfs_fcntl(int fd,int cmd,va_list args)432 int zvfs_fcntl(int fd, int cmd, va_list args)
433 {
434 	int res;
435 
436 	if (_check_fd(fd) < 0) {
437 		return -1;
438 	}
439 
440 	/* The rest of commands are per-fd, handled by ioctl vmethod. */
441 	res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
442 
443 	return res;
444 }
445 
zvfs_ftruncate_wrap(int fd,int cmd,...)446 static inline int zvfs_ftruncate_wrap(int fd, int cmd, ...)
447 {
448 	int res;
449 	va_list args;
450 
451 	__ASSERT_NO_MSG(fd < ARRAY_SIZE(fdtable));
452 
453 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
454 	va_start(args, cmd);
455 	res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
456 	va_end(args);
457 	k_mutex_unlock(&fdtable[fd].lock);
458 
459 	return res;
460 }
461 
zvfs_ftruncate(int fd,off_t length)462 int zvfs_ftruncate(int fd, off_t length)
463 {
464 	if (_check_fd(fd) < 0) {
465 		return -1;
466 	}
467 
468 	return zvfs_ftruncate_wrap(fd, ZFD_IOCTL_TRUNCATE, length);
469 }
470 
zvfs_ioctl(int fd,unsigned long request,va_list args)471 int zvfs_ioctl(int fd, unsigned long request, va_list args)
472 {
473 	if (_check_fd(fd) < 0) {
474 		return -1;
475 	}
476 
477 	return fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args);
478 }
479 
480 
481 #if defined(CONFIG_POSIX_DEVICE_IO)
482 /*
483  * fd operations for stdio/stdout/stderr
484  */
485 
486 int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
487 
stdinout_read_vmeth(void * obj,void * buffer,size_t count)488 static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
489 {
490 	return 0;
491 }
492 
stdinout_write_vmeth(void * obj,const void * buffer,size_t count)493 static ssize_t stdinout_write_vmeth(void *obj, const void *buffer, size_t count)
494 {
495 #if defined(CONFIG_BOARD_NATIVE_POSIX)
496 	return zvfs_write(1, buffer, count);
497 #elif defined(CONFIG_NEWLIB_LIBC) || defined(CONFIG_ARCMWDT_LIBC)
498 	return z_impl_zephyr_write_stdout(buffer, count);
499 #else
500 	return 0;
501 #endif
502 }
503 
stdinout_ioctl_vmeth(void * obj,unsigned int request,va_list args)504 static int stdinout_ioctl_vmeth(void *obj, unsigned int request, va_list args)
505 {
506 	errno = EINVAL;
507 	return -1;
508 }
509 
510 
511 static const struct fd_op_vtable stdinout_fd_op_vtable = {
512 	.read = stdinout_read_vmeth,
513 	.write = stdinout_write_vmeth,
514 	.ioctl = stdinout_ioctl_vmeth,
515 };
516 
517 #endif /* defined(CONFIG_POSIX_DEVICE_IO) */
518