Lines Matching refs:kn

43 static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)  in kernfs_open_file_mutex_ptr()  argument
45 int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS); in kernfs_open_file_mutex_ptr()
50 static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) in kernfs_open_file_mutex_lock() argument
54 lock = kernfs_open_file_mutex_ptr(kn); in kernfs_open_file_mutex_lock()
67 return rcu_dereference_protected(of->kn->attr.open, in of_on()
87 kernfs_deref_open_node_locked(struct kernfs_node *kn) in kernfs_deref_open_node_locked() argument
89 return rcu_dereference_protected(kn->attr.open, in kernfs_deref_open_node_locked()
90 lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); in kernfs_deref_open_node_locked()
102 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn) in kernfs_ops() argument
104 if (kn->flags & KERNFS_LOCKDEP) in kernfs_ops()
105 lockdep_assert_held(kn); in kernfs_ops()
106 return kn->attr.ops; in kernfs_ops()
134 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_stop_active()
138 kernfs_put_active(of->kn); in kernfs_seq_stop_active()
151 if (!kernfs_get_active(of->kn)) in kernfs_seq_start()
154 ops = kernfs_ops(of->kn); in kernfs_seq_start()
168 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_next()
201 return of->kn->attr.ops->seq_show(sf, v); in kernfs_seq_show()
237 if (!kernfs_get_active(of->kn)) { in kernfs_file_read_iter()
245 ops = kernfs_ops(of->kn); in kernfs_file_read_iter()
251 kernfs_put_active(of->kn); in kernfs_file_read_iter()
274 if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW) in kernfs_fop_read_iter()
322 if (!kernfs_get_active(of->kn)) { in kernfs_fop_write_iter()
328 ops = kernfs_ops(of->kn); in kernfs_fop_write_iter()
334 kernfs_put_active(of->kn); in kernfs_fop_write_iter()
356 if (!kernfs_get_active(of->kn)) in kernfs_vma_open()
362 kernfs_put_active(of->kn); in kernfs_vma_open()
374 if (!kernfs_get_active(of->kn)) in kernfs_vma_fault()
381 kernfs_put_active(of->kn); in kernfs_vma_fault()
394 if (!kernfs_get_active(of->kn)) in kernfs_vma_page_mkwrite()
403 kernfs_put_active(of->kn); in kernfs_vma_page_mkwrite()
417 if (!kernfs_get_active(of->kn)) in kernfs_vma_access()
424 kernfs_put_active(of->kn); in kernfs_vma_access()
439 if (!kernfs_get_active(of->kn)) in kernfs_vma_set_policy()
446 kernfs_put_active(of->kn); in kernfs_vma_set_policy()
460 if (!kernfs_get_active(of->kn)) in kernfs_vma_get_policy()
467 kernfs_put_active(of->kn); in kernfs_vma_get_policy()
497 if (!(of->kn->flags & KERNFS_HAS_MMAP)) in kernfs_fop_mmap()
503 if (!kernfs_get_active(of->kn)) in kernfs_fop_mmap()
506 ops = kernfs_ops(of->kn); in kernfs_fop_mmap()
536 kernfs_put_active(of->kn); in kernfs_fop_mmap()
557 static int kernfs_get_open_node(struct kernfs_node *kn, in kernfs_get_open_node() argument
563 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_get_open_node()
564 on = kernfs_deref_open_node_locked(kn); in kernfs_get_open_node()
576 rcu_assign_pointer(kn->attr.open, on); in kernfs_get_open_node()
580 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_get_open_node()
601 static void kernfs_unlink_open_file(struct kernfs_node *kn, in kernfs_unlink_open_file() argument
608 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_unlink_open_file()
610 on = kernfs_deref_open_node_locked(kn); in kernfs_unlink_open_file()
617 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_unlink_open_file()
628 rcu_assign_pointer(kn->attr.open, NULL); in kernfs_unlink_open_file()
637 struct kernfs_node *kn = inode->i_private; in kernfs_fop_open() local
638 struct kernfs_root *root = kernfs_root(kn); in kernfs_fop_open()
644 if (!kernfs_get_active(kn)) in kernfs_fop_open()
647 ops = kernfs_ops(kn); in kernfs_fop_open()
690 of->kn = kn; in kernfs_fop_open()
736 error = kernfs_get_open_node(kn, of); in kernfs_fop_open()
748 kernfs_put_active(kn); in kernfs_fop_open()
752 kernfs_unlink_open_file(kn, of, true); in kernfs_fop_open()
759 kernfs_put_active(kn); in kernfs_fop_open()
764 static void kernfs_release_file(struct kernfs_node *kn, in kernfs_release_file() argument
774 lockdep_assert_held(kernfs_open_file_mutex_ptr(kn)); in kernfs_release_file()
782 kn->attr.ops->release(of); in kernfs_release_file()
790 struct kernfs_node *kn = inode->i_private; in kernfs_fop_release() local
793 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_fop_release()
796 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_fop_release()
797 kernfs_release_file(kn, of); in kernfs_fop_release()
801 kernfs_unlink_open_file(kn, of, false); in kernfs_fop_release()
809 bool kernfs_should_drain_open_files(struct kernfs_node *kn) in kernfs_should_drain_open_files() argument
818 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); in kernfs_should_drain_open_files()
821 on = rcu_dereference(kn->attr.open); in kernfs_should_drain_open_files()
828 void kernfs_drain_open_files(struct kernfs_node *kn) in kernfs_drain_open_files() argument
834 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_drain_open_files()
835 on = kernfs_deref_open_node_locked(kn); in kernfs_drain_open_files()
850 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_drain_open_files()
851 kernfs_release_file(kn, of); in kernfs_drain_open_files()
887 struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); in kernfs_fop_poll() local
890 if (!kernfs_get_active(kn)) in kernfs_fop_poll()
893 if (kn->attr.ops->poll) in kernfs_fop_poll()
894 ret = kn->attr.ops->poll(of, wait); in kernfs_fop_poll()
898 kernfs_put_active(kn); in kernfs_fop_poll()
904 struct kernfs_node *kn; in kernfs_notify_workfn() local
910 kn = kernfs_notify_list; in kernfs_notify_workfn()
911 if (kn == KERNFS_NOTIFY_EOL) { in kernfs_notify_workfn()
915 kernfs_notify_list = kn->attr.notify_next; in kernfs_notify_workfn()
916 kn->attr.notify_next = NULL; in kernfs_notify_workfn()
919 root = kernfs_root(kn); in kernfs_notify_workfn()
923 list_for_each_entry(info, &kernfs_root(kn)->supers, node) { in kernfs_notify_workfn()
935 inode = ilookup(info->sb, kernfs_ino(kn)); in kernfs_notify_workfn()
939 name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); in kernfs_notify_workfn()
940 parent = kernfs_get_parent(kn); in kernfs_notify_workfn()
960 kernfs_put(kn); in kernfs_notify_workfn()
971 void kernfs_notify(struct kernfs_node *kn) in kernfs_notify() argument
977 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) in kernfs_notify()
982 on = rcu_dereference(kn->attr.open); in kernfs_notify()
991 if (!kn->attr.notify_next) { in kernfs_notify()
992 kernfs_get(kn); in kernfs_notify()
993 kn->attr.notify_next = kernfs_notify_list; in kernfs_notify()
994 kernfs_notify_list = kn; in kernfs_notify()
1037 struct kernfs_node *kn; in __kernfs_create_file() local
1043 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, in __kernfs_create_file()
1045 if (!kn) in __kernfs_create_file()
1048 kn->attr.ops = ops; in __kernfs_create_file()
1049 kn->attr.size = size; in __kernfs_create_file()
1050 kn->ns = ns; in __kernfs_create_file()
1051 kn->priv = priv; in __kernfs_create_file()
1055 lockdep_init_map(&kn->dep_map, "kn->active", key, 0); in __kernfs_create_file()
1056 kn->flags |= KERNFS_LOCKDEP; in __kernfs_create_file()
1066 kn->flags |= KERNFS_HAS_SEQ_SHOW; in __kernfs_create_file()
1068 kn->flags |= KERNFS_HAS_MMAP; in __kernfs_create_file()
1070 kn->flags |= KERNFS_HAS_RELEASE; in __kernfs_create_file()
1072 rc = kernfs_add_one(kn); in __kernfs_create_file()
1074 kernfs_put(kn); in __kernfs_create_file()
1077 return kn; in __kernfs_create_file()