1 /*
2  * Copyright (c) 2018 Linaro Limited
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief File descriptor table
10  *
11  * This file provides generic file descriptor table implementation, suitable
12  * for any I/O object implementing POSIX I/O semantics (i.e. read/write +
13  * aux operations).
14  */
15 
16 #include <errno.h>
17 #include <fcntl.h>
18 #include <kernel.h>
19 #include <sys/fdtable.h>
20 #include <sys/speculation.h>
21 #include <syscall_handler.h>
22 #include <sys/atomic.h>
23 
24 struct fd_entry {
25 	void *obj;
26 	const struct fd_op_vtable *vtable;
27 	atomic_t refcount;
28 	struct k_mutex lock;
29 };
30 
31 #ifdef CONFIG_POSIX_API
32 static const struct fd_op_vtable stdinout_fd_op_vtable;
33 #endif
34 
35 static struct fd_entry fdtable[CONFIG_POSIX_MAX_FDS] = {
36 #ifdef CONFIG_POSIX_API
37 	/*
38 	 * Predefine entries for stdin/stdout/stderr.
39 	 */
40 	{
41 		/* STDIN */
42 		.vtable = &stdinout_fd_op_vtable,
43 		.refcount = ATOMIC_INIT(1)
44 	},
45 	{
46 		/* STDOUT */
47 		.vtable = &stdinout_fd_op_vtable,
48 		.refcount = ATOMIC_INIT(1)
49 	},
50 	{
51 		/* STDERR */
52 		.vtable = &stdinout_fd_op_vtable,
53 		.refcount = ATOMIC_INIT(1)
54 	},
55 #endif
56 };
57 
58 static K_MUTEX_DEFINE(fdtable_lock);
59 
z_fd_ref(int fd)60 static int z_fd_ref(int fd)
61 {
62 	return atomic_inc(&fdtable[fd].refcount) + 1;
63 }
64 
z_fd_unref(int fd)65 static int z_fd_unref(int fd)
66 {
67 	atomic_val_t old_rc;
68 
69 	/* Reference counter must be checked to avoid decrement refcount below
70 	 * zero causing file descriptor leak. Loop statement below executes
71 	 * atomic decrement if refcount value is grater than zero. Otherwise,
72 	 * refcount is not going to be written.
73 	 */
74 	do {
75 		old_rc = atomic_get(&fdtable[fd].refcount);
76 		if (!old_rc) {
77 			return 0;
78 		}
79 	} while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1));
80 
81 	if (old_rc != 1) {
82 		return old_rc - 1;
83 	}
84 
85 	fdtable[fd].obj = NULL;
86 	fdtable[fd].vtable = NULL;
87 
88 	return 0;
89 }
90 
_find_fd_entry(void)91 static int _find_fd_entry(void)
92 {
93 	int fd;
94 
95 	for (fd = 0; fd < ARRAY_SIZE(fdtable); fd++) {
96 		if (!atomic_get(&fdtable[fd].refcount)) {
97 			return fd;
98 		}
99 	}
100 
101 	errno = ENFILE;
102 	return -1;
103 }
104 
_check_fd(int fd)105 static int _check_fd(int fd)
106 {
107 	if (fd < 0 || fd >= ARRAY_SIZE(fdtable)) {
108 		errno = EBADF;
109 		return -1;
110 	}
111 
112 	fd = k_array_index_sanitize(fd, ARRAY_SIZE(fdtable));
113 
114 	if (!atomic_get(&fdtable[fd].refcount)) {
115 		errno = EBADF;
116 		return -1;
117 	}
118 
119 	return 0;
120 }
121 
z_get_fd_obj(int fd,const struct fd_op_vtable * vtable,int err)122 void *z_get_fd_obj(int fd, const struct fd_op_vtable *vtable, int err)
123 {
124 	struct fd_entry *entry;
125 
126 	if (_check_fd(fd) < 0) {
127 		return NULL;
128 	}
129 
130 	entry = &fdtable[fd];
131 
132 	if (vtable != NULL && entry->vtable != vtable) {
133 		errno = err;
134 		return NULL;
135 	}
136 
137 	return entry->obj;
138 }
139 
z_get_fd_obj_and_vtable(int fd,const struct fd_op_vtable ** vtable,struct k_mutex ** lock)140 void *z_get_fd_obj_and_vtable(int fd, const struct fd_op_vtable **vtable,
141 			      struct k_mutex **lock)
142 {
143 	struct fd_entry *entry;
144 
145 	if (_check_fd(fd) < 0) {
146 		return NULL;
147 	}
148 
149 	entry = &fdtable[fd];
150 	*vtable = entry->vtable;
151 
152 	if (lock) {
153 		*lock = &entry->lock;
154 	}
155 
156 	return entry->obj;
157 }
158 
z_reserve_fd(void)159 int z_reserve_fd(void)
160 {
161 	int fd;
162 
163 	(void)k_mutex_lock(&fdtable_lock, K_FOREVER);
164 
165 	fd = _find_fd_entry();
166 	if (fd >= 0) {
167 		/* Mark entry as used, z_finalize_fd() will fill it in. */
168 		(void)z_fd_ref(fd);
169 		fdtable[fd].obj = NULL;
170 		fdtable[fd].vtable = NULL;
171 		k_mutex_init(&fdtable[fd].lock);
172 	}
173 
174 	k_mutex_unlock(&fdtable_lock);
175 
176 	return fd;
177 }
178 
z_finalize_fd(int fd,void * obj,const struct fd_op_vtable * vtable)179 void z_finalize_fd(int fd, void *obj, const struct fd_op_vtable *vtable)
180 {
181 	/* Assumes fd was already bounds-checked. */
182 #ifdef CONFIG_USERSPACE
183 	/* descriptor context objects are inserted into the table when they
184 	 * are ready for use. Mark the object as initialized and grant the
185 	 * caller (and only the caller) access.
186 	 *
187 	 * This call is a no-op if obj is invalid or points to something
188 	 * not a kernel object.
189 	 */
190 	z_object_recycle(obj);
191 #endif
192 	fdtable[fd].obj = obj;
193 	fdtable[fd].vtable = vtable;
194 
195 	/* Let the object know about the lock just in case it needs it
196 	 * for something. For BSD sockets, the lock is used with condition
197 	 * variables to avoid keeping the lock for a long period of time.
198 	 */
199 	if (vtable && vtable->ioctl) {
200 		(void)z_fdtable_call_ioctl(vtable, obj, ZFD_IOCTL_SET_LOCK,
201 					   &fdtable[fd].lock);
202 	}
203 }
204 
z_free_fd(int fd)205 void z_free_fd(int fd)
206 {
207 	/* Assumes fd was already bounds-checked. */
208 	(void)z_fd_unref(fd);
209 }
210 
z_alloc_fd(void * obj,const struct fd_op_vtable * vtable)211 int z_alloc_fd(void *obj, const struct fd_op_vtable *vtable)
212 {
213 	int fd;
214 
215 	fd = z_reserve_fd();
216 	if (fd >= 0) {
217 		z_finalize_fd(fd, obj, vtable);
218 	}
219 
220 	return fd;
221 }
222 
223 #ifdef CONFIG_POSIX_API
224 
read(int fd,void * buf,size_t sz)225 ssize_t read(int fd, void *buf, size_t sz)
226 {
227 	ssize_t res;
228 
229 	if (_check_fd(fd) < 0) {
230 		return -1;
231 	}
232 
233 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
234 
235 	res = fdtable[fd].vtable->read(fdtable[fd].obj, buf, sz);
236 
237 	k_mutex_unlock(&fdtable[fd].lock);
238 
239 	return res;
240 }
241 FUNC_ALIAS(read, _read, ssize_t);
242 
write(int fd,const void * buf,size_t sz)243 ssize_t write(int fd, const void *buf, size_t sz)
244 {
245 	ssize_t res;
246 
247 	if (_check_fd(fd) < 0) {
248 		return -1;
249 	}
250 
251 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
252 
253 	res = fdtable[fd].vtable->write(fdtable[fd].obj, buf, sz);
254 
255 	k_mutex_unlock(&fdtable[fd].lock);
256 
257 	return res;
258 }
259 FUNC_ALIAS(write, _write, ssize_t);
260 
close(int fd)261 int close(int fd)
262 {
263 	int res;
264 
265 	if (_check_fd(fd) < 0) {
266 		return -1;
267 	}
268 
269 	(void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER);
270 
271 	res = fdtable[fd].vtable->close(fdtable[fd].obj);
272 
273 	k_mutex_unlock(&fdtable[fd].lock);
274 
275 	z_free_fd(fd);
276 
277 	return res;
278 }
279 FUNC_ALIAS(close, _close, int);
280 
fsync(int fd)281 int fsync(int fd)
282 {
283 	if (_check_fd(fd) < 0) {
284 		return -1;
285 	}
286 
287 	return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_FSYNC);
288 }
289 
lseek(int fd,off_t offset,int whence)290 off_t lseek(int fd, off_t offset, int whence)
291 {
292 	if (_check_fd(fd) < 0) {
293 		return -1;
294 	}
295 
296 	return z_fdtable_call_ioctl(fdtable[fd].vtable, fdtable[fd].obj, ZFD_IOCTL_LSEEK,
297 			  offset, whence);
298 }
299 FUNC_ALIAS(lseek, _lseek, off_t);
300 
ioctl(int fd,unsigned long request,...)301 int ioctl(int fd, unsigned long request, ...)
302 {
303 	va_list args;
304 	int res;
305 
306 	if (_check_fd(fd) < 0) {
307 		return -1;
308 	}
309 
310 	va_start(args, request);
311 	res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args);
312 	va_end(args);
313 
314 	return res;
315 }
316 
fcntl(int fd,int cmd,...)317 int fcntl(int fd, int cmd, ...)
318 {
319 	va_list args;
320 	int res;
321 
322 	if (_check_fd(fd) < 0) {
323 		return -1;
324 	}
325 
326 	/* Handle fdtable commands. */
327 	if (cmd == F_DUPFD) {
328 		/* Not implemented so far. */
329 		errno = EINVAL;
330 		return -1;
331 	}
332 
333 	/* The rest of commands are per-fd, handled by ioctl vmethod. */
334 	va_start(args, cmd);
335 	res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args);
336 	va_end(args);
337 
338 	return res;
339 }
340 
341 /*
342  * fd operations for stdio/stdout/stderr
343  */
344 
345 int z_impl_zephyr_write_stdout(const char *buf, int nbytes);
346 
stdinout_read_vmeth(void * obj,void * buffer,size_t count)347 static ssize_t stdinout_read_vmeth(void *obj, void *buffer, size_t count)
348 {
349 	return 0;
350 }
351 
stdinout_write_vmeth(void * obj,const void * buffer,size_t count)352 static ssize_t stdinout_write_vmeth(void *obj, const void *buffer, size_t count)
353 {
354 #if defined(CONFIG_BOARD_NATIVE_POSIX)
355 	return write(1, buffer, count);
356 #elif defined(CONFIG_NEWLIB_LIBC) || defined(CONFIG_ARCMWDT_LIBC)
357 	return z_impl_zephyr_write_stdout(buffer, count);
358 #else
359 	return 0;
360 #endif
361 }
362 
stdinout_ioctl_vmeth(void * obj,unsigned int request,va_list args)363 static int stdinout_ioctl_vmeth(void *obj, unsigned int request, va_list args)
364 {
365 	errno = EINVAL;
366 	return -1;
367 }
368 
369 
370 static const struct fd_op_vtable stdinout_fd_op_vtable = {
371 	.read = stdinout_read_vmeth,
372 	.write = stdinout_write_vmeth,
373 	.ioctl = stdinout_ioctl_vmeth,
374 };
375 
376 #endif /* CONFIG_POSIX_API */
377