Lines Matching +full:lock +full:- +full:mode

5  * SPDX-License-Identifier: Apache-2.0
34 struct k_mutex lock; member
37 uint32_t mode; member
55 .lock = Z_MUTEX_INITIALIZER(fdtable[0].lock),
62 .lock = Z_MUTEX_INITIALIZER(fdtable[1].lock),
69 .lock = Z_MUTEX_INITIALIZER(fdtable[2].lock),
98 } while (!atomic_cas(&fdtable[fd].refcount, old_rc, old_rc - 1)); in z_fd_unref()
101 return old_rc - 1; in z_fd_unref()
121 return -1; in _find_fd_entry()
128 return -1; in _check_fd()
135 return -1; in _check_fd()
151 ref_lock = (struct k_mutex)Z_MUTEX_INITIALIZER(fdtable[fd].lock); in fdtable_fd_is_initialized()
152 if (memcmp(&ref_lock, &fdtable[fd].lock, sizeof(ref_lock)) != 0) { in fdtable_fd_is_initialized()
175 if ((vtable != NULL) && (entry->vtable != vtable)) { in zvfs_get_fd_obj()
180 return entry->obj; in zvfs_get_fd_obj()
194 return -1; in z_get_fd_by_obj_and_vtable()
197 bool zvfs_get_obj_lock_and_cond(void *obj, const struct fd_op_vtable *vtable, struct k_mutex **lock, in zvfs_get_obj_lock_and_cond() argument
210 if (lock) { in zvfs_get_obj_lock_and_cond()
211 *lock = &entry->lock; in zvfs_get_obj_lock_and_cond()
215 *cond = &entry->cond; in zvfs_get_obj_lock_and_cond()
222 struct k_mutex **lock) in zvfs_get_fd_obj_and_vtable() argument
231 *vtable = entry->vtable; in zvfs_get_fd_obj_and_vtable()
233 if (lock != NULL) { in zvfs_get_fd_obj_and_vtable()
234 *lock = &entry->lock; in zvfs_get_fd_obj_and_vtable()
237 return entry->obj; in zvfs_get_fd_obj_and_vtable()
252 k_mutex_init(&fdtable[fd].lock); in zvfs_reserve_fd()
261 void zvfs_finalize_typed_fd(int fd, void *obj, const struct fd_op_vtable *vtable, uint32_t mode) in zvfs_finalize_typed_fd() argument
263 /* Assumes fd was already bounds-checked. */ in zvfs_finalize_typed_fd()
269 * This call is a no-op if obj is invalid or points to something in zvfs_finalize_typed_fd()
276 fdtable[fd].mode = mode; in zvfs_finalize_typed_fd()
278 /* Let the object know about the lock just in case it needs it in zvfs_finalize_typed_fd()
279 * for something. For BSD sockets, the lock is used with condition in zvfs_finalize_typed_fd()
280 * variables to avoid keeping the lock for a long period of time. in zvfs_finalize_typed_fd()
282 if (vtable && vtable->ioctl) { in zvfs_finalize_typed_fd()
284 &fdtable[fd].lock); in zvfs_finalize_typed_fd()
290 /* Assumes fd was already bounds-checked. */ in zvfs_free_fd()
306 static bool supports_pread_pwrite(uint32_t mode) in supports_pread_pwrite() argument
308 switch (mode & ZVFS_MODE_IFMT) { in supports_pread_pwrite()
323 return -1; in zvfs_rw()
326 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER); in zvfs_rw()
328 prw = supports_pread_pwrite(fdtable[fd].mode); in zvfs_rw()
331 * Seekable file types should support pread() / pwrite() and per-fd offset passing. in zvfs_rw()
335 res = -1; in zvfs_rw()
343 if (fdtable[fd].vtable->write_offs == NULL) { in zvfs_rw()
344 res = -1; in zvfs_rw()
347 res = fdtable[fd].vtable->write_offs(fdtable[fd].obj, buf, sz, *off); in zvfs_rw()
350 if (fdtable[fd].vtable->read_offs == NULL) { in zvfs_rw()
351 res = -1; in zvfs_rw()
354 res = fdtable[fd].vtable->read_offs(fdtable[fd].obj, buf, sz, *off); in zvfs_rw()
366 k_mutex_unlock(&fdtable[fd].lock); in zvfs_rw()
386 return -1; in zvfs_close()
389 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER); in zvfs_close()
390 if (fdtable[fd].vtable->close != NULL) { in zvfs_close()
391 /* close() is optional - e.g. stdinout_fd_op_vtable */ in zvfs_close()
392 if (fdtable[fd].mode & ZVFS_MODE_IFSOCK) { in zvfs_close()
396 res = fdtable[fd].vtable->close2(fdtable[fd].obj, fd); in zvfs_close()
398 res = fdtable[fd].vtable->close(fdtable[fd].obj); in zvfs_close()
401 k_mutex_unlock(&fdtable[fd].lock); in zvfs_close()
408 FILE *zvfs_fdopen(int fd, const char *mode) in zvfs_fdopen() argument
410 ARG_UNUSED(mode); in zvfs_fdopen()
423 return -1; in zvfs_fileno()
426 return (struct fd_entry *)file - fdtable; in zvfs_fileno()
432 return -1; in zvfs_fstat()
441 return -1; in zvfs_fsync()
454 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER); in zvfs_lseek_wrap()
456 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args); in zvfs_lseek_wrap()
459 switch (fdtable[fd].mode & ZVFS_MODE_IFMT) { in zvfs_lseek_wrap()
470 k_mutex_unlock(&fdtable[fd].lock); in zvfs_lseek_wrap()
478 return -1; in zvfs_lseek()
489 return -1; in zvfs_fcntl()
492 /* The rest of commands are per-fd, handled by ioctl vmethod. */ in zvfs_fcntl()
493 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args); in zvfs_fcntl()
505 (void)k_mutex_lock(&fdtable[fd].lock, K_FOREVER); in zvfs_ftruncate_wrap()
507 res = fdtable[fd].vtable->ioctl(fdtable[fd].obj, cmd, args); in zvfs_ftruncate_wrap()
509 k_mutex_unlock(&fdtable[fd].lock); in zvfs_ftruncate_wrap()
517 return -1; in zvfs_ftruncate()
526 return -1; in zvfs_ioctl()
529 return fdtable[fd].vtable->ioctl(fdtable[fd].obj, request, args); in zvfs_ioctl()
559 return -1; in stdinout_ioctl_vmeth()