1 /*
2 * Copyright (c) 2018 Linaro Limited
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief File descriptor table
10 *
11 * This file provides generic file descriptor table implementation, suitable
12 * for any I/O object implementing POSIX I/O semantics (i.e. read/write +
13 * aux operations).
14 */
15
16 #include <errno.h>
17 #include <string.h>
18
19 #include <zephyr/posix/fcntl.h>
20 #include <zephyr/kernel.h>
21 #include <zephyr/sys/fdtable.h>
22 #include <zephyr/sys/speculation.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/atomic.h>
25
26 struct fd_entry {
27 void *obj;
28 const struct fd_op_vtable *vtable;
29 atomic_t refcount;
30 struct k_mutex lock;
31 struct k_condvar cond;
32 };
33
34 #ifdef CONFIG_POSIX_API
35 static const struct fd_op_vtable stdinout_fd_op_vtable;
36 #endif
37
38 static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = {
39 #ifdef CONFIG_POSIX_API
40 /*
41 * Predefine entries for stdin/stdout/stderr.
42 */
43 {
44 /* STDIN */
45 .vtable = &stdinout_fd_op_vtable,
46 .refcount = ATOMIC_INIT(1),
47 .lock = Z_MUTEX_INITIALIZER(fdtable[0].lock),
48 .cond = Z_CONDVAR_INITIALIZER(fdtable[0].cond),
49 },
50 {
51 /* STDOUT */
52 .vtable = &stdinout_fd_op_vtable,
53 .refcount = ATOMIC_INIT(1),
54 .lock = Z_MUTEX_INITIALIZER(fdtable[1].lock),
55 .cond = Z_CONDVAR_INITIALIZER(fdtable[1].cond),
56 },
57 {
58 /* STDERR */
59 .vtable = &stdinout_fd_op_vtable,
60 .refcount = ATOMIC_INIT(1),
61 .lock = Z_MUTEX_INITIALIZER(fdtable[2].lock),
62 .cond = Z_CONDVAR_INITIALIZER(fdtable[2].cond),
63 },
64 #else
65 {
66 0
67 },
68 #endif
69 };
70
71 static K_MUTEX_DEFINE(fdtable_lock);
72
z_fd_ref(int fd)73 static int z_fd_ref(int fd)
74 {
75 return atomic_inc(&fdtable[fd].refcount) + 1;
76 }
77
z_fd_unref(int fd)78 static int z_fd_unref(int fd)
79 {
80 atomic_val_t old_rc;
81
82 /* Reference counter must be checked to avoid decrement refcount below
83 * zero causing file descriptor leak. Loop statement below executes
84 * atomic decrement if refcount value is grater than zero. Otherwise,
85 * refcount is not going to be written.
86 */
87 do {
88 old_rc = atomic_get(&fdtable[fd].refcount);
89 if (!old_rc) {
90 return 0;
91 }
92 } while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1));
93
94 if (old_rc != 1) {
95 return old_rc - 1;
96 }
97
98 fdtable[fd].obj = NULL;
99 fdtable[fd].vtable = NULL;
100
101 return 0;
102 }
103
_find_fd_entry(void)104 static int _find_fd_entry(void)
105 {
106 int fd;
107
108 for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
109 if (!atomic_get(&fdtable[fd].refcount)) {
110 return fd;
111 }
112 }
113
114 errno = ENFILE;
115 return -1;
116 }
117
_check_fd(int fd)118 static int _check_fd(int fd)
119 {
120 if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
121 errno = EBADF;
122 return -1;
123 }
124
125 fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable));
126
127 if (!atomic_get(&fdtable[fd].refcount)) {
128 errno = EBADF;
129 return -1;
130 }
131
132 return 0;
133 }
134
135 #ifdef CONFIG_ZTEST
fdtable_fd_is_initialized(int fd)136 bool fdtable_fd_is_initialized(int fd)
137 {
138 struct k_mutex ref_lock;
139 struct k_condvar ref_cond;
140
141 if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
142 return false;
143 }
144
145 ref_lock = (struct k_mutex)Z_MUTEX_INITIALIZER(fdtable[fd].lock);
146 if (memcmp(&ref_lock, &fdtable[fd].lock, sizeof(ref_lock)) != 0) {
147 return false;
148 }
149
150 ref_cond = (struct k_condvar)Z_CONDVAR_INITIALIZER(fdtable[fd].cond);
151 if (memcmp(&ref_cond, &fdtable[fd].cond, sizeof(ref_cond)) != 0) {
152 return false;
153 }
154
155 return true;
156 }
157 #endif /* CONFIG_ZTEST */
158
z_get_fd_obj(int fd,const struct fd_op_vtable * vtable,int err)159 void *z_get_fd_obj(int fd, const struct fd_op_vtable *vtable, int err)
160 {
161 struct fd_entry *entry;
162
163 if (_check_fd(fd) < 0) {
164 return NULL;
165 }
166
167 entry = &fdtable[fd];
168
169 if (vtable != NULL && entry->vtable != vtable) {
170 errno = err;
171 return NULL;
172 }
173
174 return entry->obj;
175 }
176
z_get_fd_by_obj_and_vtable(void * obj,const struct fd_op_vtable * vtable)177 static int z_get_fd_by_obj_and_vtable(void *obj, const struct fd_op_vtable *vtable)
178 {
179 int fd;
180
181 for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
182 if (fdtable[fd].obj == obj && fdtable[fd].vtable == vtable) {
183 return fd;
184 }
185 }
186
187 errno = ENFILE;
188 return -1;
189 }
190
z_get_obj_lock_and_cond(void * obj,const struct fd_op_vtable * vtable,struct k_mutex ** lock,struct k_condvar ** cond)191 bool z_get_obj_lock_and_cond(void *obj, const struct fd_op_vtable *vtable, struct k_mutex **lock,
192 struct k_condvar **cond)
193 {
194 int fd;
195 struct fd_entry *entry;
196
197 fd = z_get_fd_by_obj_and_vtable(obj, vtable);
198 if (_check_fd(fd) < 0) {
199 return false;
200 }
201
202 entry = &fdtable[fd];
203
204 if (lock) {
205 *lock = &entry->lock;
206 }
207
208 if (cond) {
209 *cond = &entry->cond;
210 }
211
212 return true;
213 }
214
z_get_fd_obj_and_vtable(int fd,const struct fd_op_vtable ** vtable,struct k_mutex ** lock)215 void *z_get_fd_obj_and_vtable(int fd, const struct fd_op_vtable **vtable,
216 struct k_mutex **lock)
217 {
218 struct fd_entry *entry;
219
220 if (_check_fd(fd) < 0) {
221 return NULL;
222 }
223
224 entry = &fdtable[fd];
225 *vtable = entry->vtable;
226
227 if (lock) {
228 *lock = &entry->lock;
229 }
230
231 return entry->obj;
232 }
233
z_reserve_fd(void)234 int z_reserve_fd(void)
235 {
236 int fd;
237
238 (void)k_mutex_lock(&fdtable_lock, K_FOREVER);
239
240 fd = _find_fd_entry();
241 if (fd >= 0) {
242 /* Mark entry as used, z_finalize_fd() will fill it in. */
243 (void)z_fd_ref(fd);
244 fdtable[fd].obj = NULL;
245 fdtable[fd].vtable = NULL;
246 k_mutex_init(&fdtable[fd].lock);
247 k_condvar_init(&fdtable[fd].cond);
248 }
249
250 k_mutex_unlock(&fdtable_lock);
251
252 return fd;
253 }
254
z_finalize_fd(int fd,void * obj,const struct fd_op_vtable * vtable)255 void z_finalize_fd(int fd, void *obj, const struct fd_op_vtable *vtable)
256 {
257 /* Assumes fd was already bounds-checked. */
258 #ifdef CONFIG_USERSPACE
259 /* descriptor context objects are inserted into the table when they
260 * are ready for use. Mark the object as initialized and grant the
261 * caller (and only the caller) access.
262 *
263 * This call is a no-op if obj is invalid or points to something
264 * not a kernel object.
265 */
266 k_object_recycle(obj);
267 #endif
268 fdtable[fd].obj = obj;
269 fdtable[fd].vtable = vtable;
270
271 /* Let the object know about the lock just in case it needs it
272 * for something. For BSD sockets, the lock is used with condition
273 * variables to avoid keeping the lock for a long period of time.
274 */
275 if (vtable && vtable->ioctl) {
276 (void)z_fdtable_call_ioctl(vtable, obj, ZFD_IOCTL_SET_LOCK,
277 &fdtable[fd].lock);
278 }
279 }
280
z_free_fd(int fd)281 void z_free_fd(int fd)
282 {
283 /* Assumes fd was already bounds-checked. */
284 (void)z_fd_unref(fd);
285 }
286
z_alloc_fd(void * obj,const struct fd_op_vtable * vtable)287 int z_alloc_fd(void *obj, const struct fd_op_vtable *vtable)
288 {
289 int fd;
290
291 fd = z_reserve_fd();
292 if (fd >= 0) {
293 z_finalize_fd(fd, obj, vtable);
294 }
295
296 return fd;
297 }
298
299 #ifdef CONFIG_POSIX_API
300
read(int fd,void * buf,size_t sz)301 ssize_t read(int fd, void *buf, size_t sz)
302 {
303 ssize_t res;
304
305 if (_check_fd(fd) < 0) {
306 return -1;
307 }
308
309 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
310
311 res = fdtable[fd].vtable->read(fdtable[fd].obj, buf, sz);
312
313 k_mutex_unlock(&fdtable[fd].lock);
314
315 return res;
316 }
317 FUNC_ALIAS(read, _read, ssize_t);
318
write(int fd,const void * buf,size_t sz)319 ssize_t write(int fd, const void *buf, size_t sz)
320 {
321 ssize_t res;
322
323 if (_check_fd(fd) < 0) {
324 return -1;
325 }
326
327 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
328
329 res = fdtable[fd].vtable->write(fdtable[fd].obj, buf, sz);
330
331 k_mutex_unlock(&fdtable[fd].lock);
332
333 return res;
334 }
335 FUNC_ALIAS(write, _write, ssize_t);
336
close(int fd)337 int close(int fd)
338 {
339 int res;
340
341 if (_check_fd(fd) < 0) {
342 return -1;
343 }
344
345 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
346
347 res = fdtable[fd].vtable->close(fdtable[fd].obj);
348
349 k_mutex_unlock(&fdtable[fd].lock);
350
351 z_free_fd(fd);
352
353 return res;
354 }
355 FUNC_ALIAS(close, _close, int);
356
fsync(int fd)357 int fsync(int fd)
358 {
359 if (_check_fd(fd) < 0) {
360 return -1;
361 }
362
363 return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_FSYNC);
364 }
365
lseek(int fd,off_t offset,int whence)366 off_t lseek(int fd, off_t offset, int whence)
367 {
368 if (_check_fd(fd) < 0) {
369 return -1;
370 }
371
372 return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_LSEEK,
373 offset, whence);
374 }
375 FUNC_ALIAS(lseek, _lseek, off_t);
376
ioctl(int fd,unsigned long request,...)377 int ioctl(int fd, unsigned long request, ...)
378 {
379 va_list args;
380 int res;
381
382 if (_check_fd(fd) < 0) {
383 return -1;
384 }
385
386 va_start(args, request);
387 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args);
388 va_end(args);
389
390 return res;
391 }
392
fcntl(int fd,int cmd,...)393 int fcntl(int fd, int cmd, ...)
394 {
395 va_list args;
396 int res;
397
398 if (_check_fd(fd) < 0) {
399 return -1;
400 }
401
402 /* Handle fdtable commands. */
403 if (cmd == F_DUPFD) {
404 /* Not implemented so far. */
405 errno = EINVAL;
406 return -1;
407 }
408
409 /* The rest of commands are per-fd, handled by ioctl vmethod. */
410 va_start(args, cmd);
411 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
412 va_end(args);
413
414 return res;
415 }
416
417 /*
418 * fd operations for stdio/stdout/stderr
419 */
420
421 int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
422
stdinout_read_vmeth(void * obj,void * buffer,size_t count)423 static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
424 {
425 return 0;
426 }
427
stdinout_write_vmeth(void * obj,const void * buffer,size_t count)428 static ssize_t stdinout_write_vmeth(void *obj, const void *buffer, size_t count)
429 {
430 #if defined(CONFIG_BOARD_NATIVE_POSIX)
431 return write(1, buffer, count);
432 #elif defined(CONFIG_NEWLIB_LIBC) || defined(CONFIG_ARCMWDT_LIBC)
433 return z_impl_zephyr_write_stdout(buffer, count);
434 #else
435 return 0;
436 #endif
437 }
438
stdinout_ioctl_vmeth(void * obj,unsigned int request,va_list args)439 static int stdinout_ioctl_vmeth(void *obj, unsigned int request, va_list args)
440 {
441 errno = EINVAL;
442 return -1;
443 }
444
445
446 static const struct fd_op_vtable stdinout_fd_op_vtable = {
447 .read = stdinout_read_vmeth,
448 .write = stdinout_write_vmeth,
449 .ioctl = stdinout_ioctl_vmeth,
450 };
451
452 #endif /* CONFIG_POSIX_API */
453