Lines Matching +full:poll +full:- +full:retry +full:- +full:count
1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/kernfs/file.c - kernfs file implementation
5 * Copyright (c) 2001-3 Patrick Mochel
13 #include <linux/poll.h>
18 #include "kernfs-internal.h"
24 * kernfs_node->attr.open points to kernfs_open_node. attr.open is
27 * filp->private_data points to seq_file whose ->private points to
29 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
37 wait_queue_head_t poll; member
56 return ((struct seq_file *)file->private_data)->private; in kernfs_of()
65 if (kn->flags & KERNFS_LOCKDEP) in kernfs_ops()
67 return kn->attr.ops; in kernfs_ops()
77 * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
81 * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
82 * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
84 * should be performed or not only on ERR_PTR(-ENODEV).
88 * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
89 * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
94 struct kernfs_open_file *of = sf->private; in kernfs_seq_stop_active()
95 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_stop_active()
97 if (ops->seq_stop) in kernfs_seq_stop_active()
98 ops->seq_stop(sf, v); in kernfs_seq_stop_active()
99 kernfs_put_active(of->kn); in kernfs_seq_stop_active()
104 struct kernfs_open_file *of = sf->private; in kernfs_seq_start()
108 * @of->mutex nests outside active ref and is primarily to ensure that in kernfs_seq_start()
111 mutex_lock(&of->mutex); in kernfs_seq_start()
112 if (!kernfs_get_active(of->kn)) in kernfs_seq_start()
113 return ERR_PTR(-ENODEV); in kernfs_seq_start()
115 ops = kernfs_ops(of->kn); in kernfs_seq_start()
116 if (ops->seq_start) { in kernfs_seq_start()
117 void *next = ops->seq_start(sf, ppos); in kernfs_seq_start()
119 if (next == ERR_PTR(-ENODEV)) in kernfs_seq_start()
133 struct kernfs_open_file *of = sf->private; in kernfs_seq_next()
134 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_next()
136 if (ops->seq_next) { in kernfs_seq_next()
137 void *next = ops->seq_next(sf, v, ppos); in kernfs_seq_next()
139 if (next == ERR_PTR(-ENODEV)) in kernfs_seq_next()
154 struct kernfs_open_file *of = sf->private; in kernfs_seq_stop()
156 if (v != ERR_PTR(-ENODEV)) in kernfs_seq_stop()
158 mutex_unlock(&of->mutex); in kernfs_seq_stop()
163 struct kernfs_open_file *of = sf->private; in kernfs_seq_show()
165 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_seq_show()
167 return of->kn->attr.ops->seq_show(sf, v); in kernfs_seq_show()
178 * As reading a bin file can have side-effects, the exact offset and bytes
184 char __user *user_buf, size_t count, in kernfs_file_direct_read() argument
187 ssize_t len = min_t(size_t, count, PAGE_SIZE); in kernfs_file_direct_read()
191 buf = of->prealloc_buf; in kernfs_file_direct_read()
193 mutex_lock(&of->prealloc_mutex); in kernfs_file_direct_read()
197 return -ENOMEM; in kernfs_file_direct_read()
200 * @of->mutex nests outside active ref and is used both to ensure that in kernfs_file_direct_read()
203 mutex_lock(&of->mutex); in kernfs_file_direct_read()
204 if (!kernfs_get_active(of->kn)) { in kernfs_file_direct_read()
205 len = -ENODEV; in kernfs_file_direct_read()
206 mutex_unlock(&of->mutex); in kernfs_file_direct_read()
210 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_file_direct_read()
211 ops = kernfs_ops(of->kn); in kernfs_file_direct_read()
212 if (ops->read) in kernfs_file_direct_read()
213 len = ops->read(of, buf, len, *ppos); in kernfs_file_direct_read()
215 len = -EINVAL; in kernfs_file_direct_read()
217 kernfs_put_active(of->kn); in kernfs_file_direct_read()
218 mutex_unlock(&of->mutex); in kernfs_file_direct_read()
224 len = -EFAULT; in kernfs_file_direct_read()
231 if (buf == of->prealloc_buf) in kernfs_file_direct_read()
232 mutex_unlock(&of->prealloc_mutex); in kernfs_file_direct_read()
239 * kernfs_fop_read - kernfs vfs read callback
242 * @count: number of bytes
246 size_t count, loff_t *ppos) in kernfs_fop_read() argument
250 if (of->kn->flags & KERNFS_HAS_SEQ_SHOW) in kernfs_fop_read()
251 return seq_read(file, user_buf, count, ppos); in kernfs_fop_read()
253 return kernfs_file_direct_read(of, user_buf, count, ppos); in kernfs_fop_read()
257 * kernfs_fop_write - kernfs vfs write callback
260 * @count: number of bytes
273 size_t count, loff_t *ppos) in kernfs_fop_write() argument
280 if (of->atomic_write_len) { in kernfs_fop_write()
281 len = count; in kernfs_fop_write()
282 if (len > of->atomic_write_len) in kernfs_fop_write()
283 return -E2BIG; in kernfs_fop_write()
285 len = min_t(size_t, count, PAGE_SIZE); in kernfs_fop_write()
288 buf = of->prealloc_buf; in kernfs_fop_write()
290 mutex_lock(&of->prealloc_mutex); in kernfs_fop_write()
294 return -ENOMEM; in kernfs_fop_write()
297 len = -EFAULT; in kernfs_fop_write()
303 * @of->mutex nests outside active ref and is used both to ensure that in kernfs_fop_write()
306 mutex_lock(&of->mutex); in kernfs_fop_write()
307 if (!kernfs_get_active(of->kn)) { in kernfs_fop_write()
308 mutex_unlock(&of->mutex); in kernfs_fop_write()
309 len = -ENODEV; in kernfs_fop_write()
313 ops = kernfs_ops(of->kn); in kernfs_fop_write()
314 if (ops->write) in kernfs_fop_write()
315 len = ops->write(of, buf, len, *ppos); in kernfs_fop_write()
317 len = -EINVAL; in kernfs_fop_write()
319 kernfs_put_active(of->kn); in kernfs_fop_write()
320 mutex_unlock(&of->mutex); in kernfs_fop_write()
326 if (buf == of->prealloc_buf) in kernfs_fop_write()
327 mutex_unlock(&of->prealloc_mutex); in kernfs_fop_write()
335 struct file *file = vma->vm_file; in kernfs_vma_open()
338 if (!of->vm_ops) in kernfs_vma_open()
341 if (!kernfs_get_active(of->kn)) in kernfs_vma_open()
344 if (of->vm_ops->open) in kernfs_vma_open()
345 of->vm_ops->open(vma); in kernfs_vma_open()
347 kernfs_put_active(of->kn); in kernfs_vma_open()
352 struct file *file = vmf->vma->vm_file; in kernfs_vma_fault()
356 if (!of->vm_ops) in kernfs_vma_fault()
359 if (!kernfs_get_active(of->kn)) in kernfs_vma_fault()
363 if (of->vm_ops->fault) in kernfs_vma_fault()
364 ret = of->vm_ops->fault(vmf); in kernfs_vma_fault()
366 kernfs_put_active(of->kn); in kernfs_vma_fault()
372 struct file *file = vmf->vma->vm_file; in kernfs_vma_page_mkwrite()
376 if (!of->vm_ops) in kernfs_vma_page_mkwrite()
379 if (!kernfs_get_active(of->kn)) in kernfs_vma_page_mkwrite()
383 if (of->vm_ops->page_mkwrite) in kernfs_vma_page_mkwrite()
384 ret = of->vm_ops->page_mkwrite(vmf); in kernfs_vma_page_mkwrite()
388 kernfs_put_active(of->kn); in kernfs_vma_page_mkwrite()
395 struct file *file = vma->vm_file; in kernfs_vma_access()
399 if (!of->vm_ops) in kernfs_vma_access()
400 return -EINVAL; in kernfs_vma_access()
402 if (!kernfs_get_active(of->kn)) in kernfs_vma_access()
403 return -EINVAL; in kernfs_vma_access()
405 ret = -EINVAL; in kernfs_vma_access()
406 if (of->vm_ops->access) in kernfs_vma_access()
407 ret = of->vm_ops->access(vma, addr, buf, len, write); in kernfs_vma_access()
409 kernfs_put_active(of->kn); in kernfs_vma_access()
417 struct file *file = vma->vm_file; in kernfs_vma_set_policy()
421 if (!of->vm_ops) in kernfs_vma_set_policy()
424 if (!kernfs_get_active(of->kn)) in kernfs_vma_set_policy()
425 return -EINVAL; in kernfs_vma_set_policy()
428 if (of->vm_ops->set_policy) in kernfs_vma_set_policy()
429 ret = of->vm_ops->set_policy(vma, new); in kernfs_vma_set_policy()
431 kernfs_put_active(of->kn); in kernfs_vma_set_policy()
438 struct file *file = vma->vm_file; in kernfs_vma_get_policy()
442 if (!of->vm_ops) in kernfs_vma_get_policy()
443 return vma->vm_policy; in kernfs_vma_get_policy()
445 if (!kernfs_get_active(of->kn)) in kernfs_vma_get_policy()
446 return vma->vm_policy; in kernfs_vma_get_policy()
448 pol = vma->vm_policy; in kernfs_vma_get_policy()
449 if (of->vm_ops->get_policy) in kernfs_vma_get_policy()
450 pol = of->vm_ops->get_policy(vma, addr); in kernfs_vma_get_policy()
452 kernfs_put_active(of->kn); in kernfs_vma_get_policy()
476 * mmap path and of->mutex are prone to triggering spurious lockdep in kernfs_fop_mmap()
479 * without grabbing @of->mutex by testing HAS_MMAP flag. See the in kernfs_fop_mmap()
482 if (!(of->kn->flags & KERNFS_HAS_MMAP)) in kernfs_fop_mmap()
483 return -ENODEV; in kernfs_fop_mmap()
485 mutex_lock(&of->mutex); in kernfs_fop_mmap()
487 rc = -ENODEV; in kernfs_fop_mmap()
488 if (!kernfs_get_active(of->kn)) in kernfs_fop_mmap()
491 ops = kernfs_ops(of->kn); in kernfs_fop_mmap()
492 rc = ops->mmap(of, vma); in kernfs_fop_mmap()
501 if (vma->vm_file != file) in kernfs_fop_mmap()
504 rc = -EINVAL; in kernfs_fop_mmap()
505 if (of->mmapped && of->vm_ops != vma->vm_ops) in kernfs_fop_mmap()
512 rc = -EINVAL; in kernfs_fop_mmap()
513 if (vma->vm_ops && vma->vm_ops->close) in kernfs_fop_mmap()
517 of->mmapped = true; in kernfs_fop_mmap()
518 of->vm_ops = vma->vm_ops; in kernfs_fop_mmap()
519 vma->vm_ops = &kernfs_vm_ops; in kernfs_fop_mmap()
521 kernfs_put_active(of->kn); in kernfs_fop_mmap()
523 mutex_unlock(&of->mutex); in kernfs_fop_mmap()
529 * kernfs_get_open_node - get or create kernfs_open_node
533 * If @kn->attr.open exists, increment its reference count; otherwise,
540 * 0 on success, -errno on failure.
547 retry: in kernfs_get_open_node()
551 if (!kn->attr.open && new_on) { in kernfs_get_open_node()
552 kn->attr.open = new_on; in kernfs_get_open_node()
556 on = kn->attr.open; in kernfs_get_open_node()
558 atomic_inc(&on->refcnt); in kernfs_get_open_node()
559 list_add_tail(&of->list, &on->files); in kernfs_get_open_node()
570 /* not there, initialize a new one and retry */ in kernfs_get_open_node()
573 return -ENOMEM; in kernfs_get_open_node()
575 atomic_set(&new_on->refcnt, 0); in kernfs_get_open_node()
576 atomic_set(&new_on->event, 1); in kernfs_get_open_node()
577 init_waitqueue_head(&new_on->poll); in kernfs_get_open_node()
578 INIT_LIST_HEAD(&new_on->files); in kernfs_get_open_node()
579 goto retry; in kernfs_get_open_node()
583 * kernfs_put_open_node - put kernfs_open_node
587 * Put @kn->attr.open and unlink @of from the files list. If
588 * reference count reaches zero, disassociate and free it.
596 struct kernfs_open_node *on = kn->attr.open; in kernfs_put_open_node()
603 list_del(&of->list); in kernfs_put_open_node()
605 if (atomic_dec_and_test(&on->refcnt)) in kernfs_put_open_node()
606 kn->attr.open = NULL; in kernfs_put_open_node()
618 struct kernfs_node *kn = inode->i_private; in kernfs_fop_open()
623 int error = -EACCES; in kernfs_fop_open()
626 return -ENODEV; in kernfs_fop_open()
630 has_read = ops->seq_show || ops->read || ops->mmap; in kernfs_fop_open()
631 has_write = ops->write || ops->mmap; in kernfs_fop_open()
632 has_mmap = ops->mmap; in kernfs_fop_open()
635 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { in kernfs_fop_open()
636 if ((file->f_mode & FMODE_WRITE) && in kernfs_fop_open()
637 (!(inode->i_mode & S_IWUGO) || !has_write)) in kernfs_fop_open()
640 if ((file->f_mode & FMODE_READ) && in kernfs_fop_open()
641 (!(inode->i_mode & S_IRUGO) || !has_read)) in kernfs_fop_open()
646 error = -ENOMEM; in kernfs_fop_open()
653 * @of->mutex for files which implement mmap. This is a rather in kernfs_fop_open()
655 * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and in kernfs_fop_open()
657 * which mm->mmap_lock nests, while holding @of->mutex. As each in kernfs_fop_open()
664 * look that way and give @of->mutex different static lockdep keys. in kernfs_fop_open()
667 mutex_init(&of->mutex); in kernfs_fop_open()
669 mutex_init(&of->mutex); in kernfs_fop_open()
671 of->kn = kn; in kernfs_fop_open()
672 of->file = file; in kernfs_fop_open()
678 of->atomic_write_len = ops->atomic_write_len; in kernfs_fop_open()
680 error = -EINVAL; in kernfs_fop_open()
682 * ->seq_show is incompatible with ->prealloc, in kernfs_fop_open()
684 * ->read must be used instead. in kernfs_fop_open()
686 if (ops->prealloc && ops->seq_show) in kernfs_fop_open()
688 if (ops->prealloc) { in kernfs_fop_open()
689 int len = of->atomic_write_len ?: PAGE_SIZE; in kernfs_fop_open()
690 of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); in kernfs_fop_open()
691 error = -ENOMEM; in kernfs_fop_open()
692 if (!of->prealloc_buf) in kernfs_fop_open()
694 mutex_init(&of->prealloc_mutex); in kernfs_fop_open()
702 if (ops->seq_show) in kernfs_fop_open()
709 of->seq_file = file->private_data; in kernfs_fop_open()
710 of->seq_file->private = of; in kernfs_fop_open()
713 if (file->f_mode & FMODE_WRITE) in kernfs_fop_open()
714 file->f_mode |= FMODE_PWRITE; in kernfs_fop_open()
721 if (ops->open) { in kernfs_fop_open()
722 /* nobody has access to @of yet, skip @of->mutex */ in kernfs_fop_open()
723 error = ops->open(of); in kernfs_fop_open()
737 kfree(of->prealloc_buf); in kernfs_fop_open()
744 /* used from release/drain to ensure that ->release() is called exactly once */
751 * @kernfs_open_file_mutex is enough. @of->mutex can't be used in kernfs_release_file()
757 if (!of->released) { in kernfs_release_file()
763 kn->attr.ops->release(of); in kernfs_release_file()
764 of->released = true; in kernfs_release_file()
770 struct kernfs_node *kn = inode->i_private; in kernfs_fop_release()
773 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_fop_release()
781 kfree(of->prealloc_buf); in kernfs_fop_release()
792 if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) in kernfs_drain_open_files()
796 on = kn->attr.open; in kernfs_drain_open_files()
798 atomic_inc(&on->refcnt); in kernfs_drain_open_files()
805 list_for_each_entry(of, &on->files, list) { in kernfs_drain_open_files()
806 struct inode *inode = file_inode(of->file); in kernfs_drain_open_files()
808 if (kn->flags & KERNFS_HAS_MMAP) in kernfs_drain_open_files()
809 unmap_mapping_range(inode->i_mapping, 0, 0, 1); in kernfs_drain_open_files()
811 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_drain_open_files()
822 * the content and then you use 'poll' or 'select' to wait for
824 * manager for the kobject supports notification), poll will
827 * Once poll/select indicates that the value has changed, you
828 * need to close and re-open the file, or seek to 0 and read again.
831 * to see if it supports poll (Neither 'poll' nor 'select' return
836 struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); in kernfs_generic_poll()
837 struct kernfs_open_node *on = kn->attr.open; in kernfs_generic_poll()
839 poll_wait(of->file, &on->poll, wait); in kernfs_generic_poll()
841 if (of->event != atomic_read(&on->event)) in kernfs_generic_poll()
850 struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); in kernfs_fop_poll()
856 if (kn->attr.ops->poll) in kernfs_fop_poll()
857 ret = kn->attr.ops->poll(of, wait); in kernfs_fop_poll()
877 kernfs_notify_list = kn->attr.notify_next; in kernfs_notify_workfn()
878 kn->attr.notify_next = NULL; in kernfs_notify_workfn()
884 list_for_each_entry(info, &kernfs_root(kn)->supers, node) { in kernfs_notify_workfn()
896 inode = ilookup(info->sb, kernfs_ino(kn)); in kernfs_notify_workfn()
900 name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); in kernfs_notify_workfn()
903 p_inode = ilookup(info->sb, kernfs_ino(parent)); in kernfs_notify_workfn()
926 * kernfs_notify - notify a kernfs file
929 * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
941 /* kick poll immediately */ in kernfs_notify()
943 on = kn->attr.open; in kernfs_notify()
945 atomic_inc(&on->event); in kernfs_notify()
946 wake_up_interruptible(&on->poll); in kernfs_notify()
952 if (!kn->attr.notify_next) { in kernfs_notify()
954 kn->attr.notify_next = kernfs_notify_list; in kernfs_notify()
969 .poll = kernfs_fop_poll,
974 * __kernfs_create_file - kernfs internal function to create a file
1005 return ERR_PTR(-ENOMEM); in __kernfs_create_file()
1007 kn->attr.ops = ops; in __kernfs_create_file()
1008 kn->attr.size = size; in __kernfs_create_file()
1009 kn->ns = ns; in __kernfs_create_file()
1010 kn->priv = priv; in __kernfs_create_file()
1014 lockdep_init_map(&kn->dep_map, "kn->active", key, 0); in __kernfs_create_file()
1015 kn->flags |= KERNFS_LOCKDEP; in __kernfs_create_file()
1020 * kn->attr.ops is accesible only while holding active ref. We in __kernfs_create_file()
1024 if (ops->seq_show) in __kernfs_create_file()
1025 kn->flags |= KERNFS_HAS_SEQ_SHOW; in __kernfs_create_file()
1026 if (ops->mmap) in __kernfs_create_file()
1027 kn->flags |= KERNFS_HAS_MMAP; in __kernfs_create_file()
1028 if (ops->release) in __kernfs_create_file()
1029 kn->flags |= KERNFS_HAS_RELEASE; in __kernfs_create_file()