Lines Matching refs:kn

43 static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)  in kernfs_open_file_mutex_ptr()  argument
45 int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS); in kernfs_open_file_mutex_ptr()
50 static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) in kernfs_open_file_mutex_lock() argument
54 lock = kernfs_open_file_mutex_ptr(kn); in kernfs_open_file_mutex_lock()
69 return rcu_dereference_protected(of->kn->attr.open, in of_on()
91 kernfs_deref_open_node_locked(struct kernfs_node *kn) in kernfs_deref_open_node_locked() argument
93 return rcu_dereference_protected(kn->attr.open, in kernfs_deref_open_node_locked()
94 lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); in kernfs_deref_open_node_locked()
106 static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn) in kernfs_ops() argument
108 if (kn->flags & KERNFS_LOCKDEP) in kernfs_ops()
109 lockdep_assert_held(kn); in kernfs_ops()
110 return kn->attr.ops; in kernfs_ops()
138 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_stop_active()
142 kernfs_put_active(of->kn); in kernfs_seq_stop_active()
155 if (!kernfs_get_active(of->kn)) in kernfs_seq_start()
158 ops = kernfs_ops(of->kn); in kernfs_seq_start()
172 const struct kernfs_ops *ops = kernfs_ops(of->kn); in kernfs_seq_next()
205 return of->kn->attr.ops->seq_show(sf, v); in kernfs_seq_show()
241 if (!kernfs_get_active(of->kn)) { in kernfs_file_read_iter()
249 ops = kernfs_ops(of->kn); in kernfs_file_read_iter()
255 kernfs_put_active(of->kn); in kernfs_file_read_iter()
278 if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW) in kernfs_fop_read_iter()
326 if (!kernfs_get_active(of->kn)) { in kernfs_fop_write_iter()
332 ops = kernfs_ops(of->kn); in kernfs_fop_write_iter()
338 kernfs_put_active(of->kn); in kernfs_fop_write_iter()
360 if (!kernfs_get_active(of->kn)) in kernfs_vma_open()
366 kernfs_put_active(of->kn); in kernfs_vma_open()
378 if (!kernfs_get_active(of->kn)) in kernfs_vma_fault()
385 kernfs_put_active(of->kn); in kernfs_vma_fault()
398 if (!kernfs_get_active(of->kn)) in kernfs_vma_page_mkwrite()
407 kernfs_put_active(of->kn); in kernfs_vma_page_mkwrite()
421 if (!kernfs_get_active(of->kn)) in kernfs_vma_access()
428 kernfs_put_active(of->kn); in kernfs_vma_access()
443 if (!kernfs_get_active(of->kn)) in kernfs_vma_set_policy()
450 kernfs_put_active(of->kn); in kernfs_vma_set_policy()
464 if (!kernfs_get_active(of->kn)) in kernfs_vma_get_policy()
471 kernfs_put_active(of->kn); in kernfs_vma_get_policy()
501 if (!(of->kn->flags & KERNFS_HAS_MMAP)) in kernfs_fop_mmap()
507 if (!kernfs_get_active(of->kn)) in kernfs_fop_mmap()
510 ops = kernfs_ops(of->kn); in kernfs_fop_mmap()
540 kernfs_put_active(of->kn); in kernfs_fop_mmap()
561 static int kernfs_get_open_node(struct kernfs_node *kn, in kernfs_get_open_node() argument
567 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_get_open_node()
568 on = kernfs_deref_open_node_locked(kn); in kernfs_get_open_node()
580 rcu_assign_pointer(kn->attr.open, on); in kernfs_get_open_node()
584 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_get_open_node()
605 static void kernfs_unlink_open_file(struct kernfs_node *kn, in kernfs_unlink_open_file() argument
612 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_unlink_open_file()
614 on = kernfs_deref_open_node_locked(kn); in kernfs_unlink_open_file()
621 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_unlink_open_file()
632 rcu_assign_pointer(kn->attr.open, NULL); in kernfs_unlink_open_file()
641 struct kernfs_node *kn = inode->i_private; in kernfs_fop_open() local
642 struct kernfs_root *root = kernfs_root(kn); in kernfs_fop_open()
648 if (!kernfs_get_active(kn)) in kernfs_fop_open()
651 ops = kernfs_ops(kn); in kernfs_fop_open()
694 of->kn = kn; in kernfs_fop_open()
740 error = kernfs_get_open_node(kn, of); in kernfs_fop_open()
752 kernfs_put_active(kn); in kernfs_fop_open()
756 kernfs_unlink_open_file(kn, of, true); in kernfs_fop_open()
763 kernfs_put_active(kn); in kernfs_fop_open()
768 static void kernfs_release_file(struct kernfs_node *kn, in kernfs_release_file() argument
778 lockdep_assert_held(kernfs_open_file_mutex_ptr(kn)); in kernfs_release_file()
786 kn->attr.ops->release(of); in kernfs_release_file()
794 struct kernfs_node *kn = inode->i_private; in kernfs_fop_release() local
797 if (kn->flags & KERNFS_HAS_RELEASE) { in kernfs_fop_release()
800 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_fop_release()
801 kernfs_release_file(kn, of); in kernfs_fop_release()
805 kernfs_unlink_open_file(kn, of, false); in kernfs_fop_release()
813 bool kernfs_should_drain_open_files(struct kernfs_node *kn) in kernfs_should_drain_open_files() argument
822 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); in kernfs_should_drain_open_files()
825 on = rcu_dereference(kn->attr.open); in kernfs_should_drain_open_files()
832 void kernfs_drain_open_files(struct kernfs_node *kn) in kernfs_drain_open_files() argument
838 mutex = kernfs_open_file_mutex_lock(kn); in kernfs_drain_open_files()
839 on = kernfs_deref_open_node_locked(kn); in kernfs_drain_open_files()
854 if (kn->flags & KERNFS_HAS_RELEASE) in kernfs_drain_open_files()
855 kernfs_release_file(kn, of); in kernfs_drain_open_files()
891 struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); in kernfs_fop_poll() local
894 if (!kernfs_get_active(kn)) in kernfs_fop_poll()
897 if (kn->attr.ops->poll) in kernfs_fop_poll()
898 ret = kn->attr.ops->poll(of, wait); in kernfs_fop_poll()
902 kernfs_put_active(kn); in kernfs_fop_poll()
908 struct kernfs_node *kn; in kernfs_notify_workfn() local
914 kn = kernfs_notify_list; in kernfs_notify_workfn()
915 if (kn == KERNFS_NOTIFY_EOL) { in kernfs_notify_workfn()
919 kernfs_notify_list = kn->attr.notify_next; in kernfs_notify_workfn()
920 kn->attr.notify_next = NULL; in kernfs_notify_workfn()
923 root = kernfs_root(kn); in kernfs_notify_workfn()
927 list_for_each_entry(info, &kernfs_root(kn)->supers, node) { in kernfs_notify_workfn()
939 inode = ilookup(info->sb, kernfs_ino(kn)); in kernfs_notify_workfn()
943 name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); in kernfs_notify_workfn()
944 parent = kernfs_get_parent(kn); in kernfs_notify_workfn()
964 kernfs_put(kn); in kernfs_notify_workfn()
975 void kernfs_notify(struct kernfs_node *kn) in kernfs_notify() argument
981 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) in kernfs_notify()
986 on = rcu_dereference(kn->attr.open); in kernfs_notify()
995 if (!kn->attr.notify_next) { in kernfs_notify()
996 kernfs_get(kn); in kernfs_notify()
997 kn->attr.notify_next = kernfs_notify_list; in kernfs_notify()
998 kernfs_notify_list = kn; in kernfs_notify()
1041 struct kernfs_node *kn; in __kernfs_create_file() local
1047 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, in __kernfs_create_file()
1049 if (!kn) in __kernfs_create_file()
1052 kn->attr.ops = ops; in __kernfs_create_file()
1053 kn->attr.size = size; in __kernfs_create_file()
1054 kn->ns = ns; in __kernfs_create_file()
1055 kn->priv = priv; in __kernfs_create_file()
1059 lockdep_init_map(&kn->dep_map, "kn->active", key, 0); in __kernfs_create_file()
1060 kn->flags |= KERNFS_LOCKDEP; in __kernfs_create_file()
1070 kn->flags |= KERNFS_HAS_SEQ_SHOW; in __kernfs_create_file()
1072 kn->flags |= KERNFS_HAS_MMAP; in __kernfs_create_file()
1074 kn->flags |= KERNFS_HAS_RELEASE; in __kernfs_create_file()
1076 rc = kernfs_add_one(kn); in __kernfs_create_file()
1078 kernfs_put(kn); in __kernfs_create_file()
1081 return kn; in __kernfs_create_file()