Lines Matching refs:mnt

90 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)  in m_hash()  argument
92 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); in m_hash()
105 static int mnt_alloc_id(struct mount *mnt) in mnt_alloc_id() argument
111 mnt->mnt_id = res; in mnt_alloc_id()
115 static void mnt_free_id(struct mount *mnt) in mnt_free_id() argument
117 ida_free(&mnt_id_ida, mnt->mnt_id); in mnt_free_id()
123 static int mnt_alloc_group_id(struct mount *mnt) in mnt_alloc_group_id() argument
129 mnt->mnt_group_id = res; in mnt_alloc_group_id()
136 void mnt_release_group_id(struct mount *mnt) in mnt_release_group_id() argument
138 ida_free(&mnt_group_ida, mnt->mnt_group_id); in mnt_release_group_id()
139 mnt->mnt_group_id = 0; in mnt_release_group_id()
145 static inline void mnt_add_count(struct mount *mnt, int n) in mnt_add_count() argument
148 this_cpu_add(mnt->mnt_pcp->mnt_count, n); in mnt_add_count()
151 mnt->mnt_count += n; in mnt_add_count()
159 unsigned int mnt_get_count(struct mount *mnt) in mnt_get_count() argument
166 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count()
171 return mnt->mnt_count; in mnt_get_count()
177 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); in alloc_vfsmnt() local
178 if (mnt) { in alloc_vfsmnt()
181 err = mnt_alloc_id(mnt); in alloc_vfsmnt()
186 mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); in alloc_vfsmnt()
187 if (!mnt->mnt_devname) in alloc_vfsmnt()
192 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); in alloc_vfsmnt()
193 if (!mnt->mnt_pcp) in alloc_vfsmnt()
196 this_cpu_add(mnt->mnt_pcp->mnt_count, 1); in alloc_vfsmnt()
198 mnt->mnt_count = 1; in alloc_vfsmnt()
199 mnt->mnt_writers = 0; in alloc_vfsmnt()
202 INIT_HLIST_NODE(&mnt->mnt_hash); in alloc_vfsmnt()
203 INIT_LIST_HEAD(&mnt->mnt_child); in alloc_vfsmnt()
204 INIT_LIST_HEAD(&mnt->mnt_mounts); in alloc_vfsmnt()
205 INIT_LIST_HEAD(&mnt->mnt_list); in alloc_vfsmnt()
206 INIT_LIST_HEAD(&mnt->mnt_expire); in alloc_vfsmnt()
207 INIT_LIST_HEAD(&mnt->mnt_share); in alloc_vfsmnt()
208 INIT_LIST_HEAD(&mnt->mnt_slave_list); in alloc_vfsmnt()
209 INIT_LIST_HEAD(&mnt->mnt_slave); in alloc_vfsmnt()
210 INIT_HLIST_NODE(&mnt->mnt_mp_list); in alloc_vfsmnt()
211 INIT_LIST_HEAD(&mnt->mnt_umounting); in alloc_vfsmnt()
212 INIT_HLIST_HEAD(&mnt->mnt_stuck_children); in alloc_vfsmnt()
214 return mnt; in alloc_vfsmnt()
218 kfree_const(mnt->mnt_devname); in alloc_vfsmnt()
221 mnt_free_id(mnt); in alloc_vfsmnt()
223 kmem_cache_free(mnt_cache, mnt); in alloc_vfsmnt()
246 bool __mnt_is_readonly(struct vfsmount *mnt) in __mnt_is_readonly() argument
248 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); in __mnt_is_readonly()
252 static inline void mnt_inc_writers(struct mount *mnt) in mnt_inc_writers() argument
255 this_cpu_inc(mnt->mnt_pcp->mnt_writers); in mnt_inc_writers()
257 mnt->mnt_writers++; in mnt_inc_writers()
261 static inline void mnt_dec_writers(struct mount *mnt) in mnt_dec_writers() argument
264 this_cpu_dec(mnt->mnt_pcp->mnt_writers); in mnt_dec_writers()
266 mnt->mnt_writers--; in mnt_dec_writers()
270 static unsigned int mnt_get_writers(struct mount *mnt) in mnt_get_writers() argument
277 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
282 return mnt->mnt_writers; in mnt_get_writers()
286 static int mnt_is_readonly(struct vfsmount *mnt) in mnt_is_readonly() argument
288 if (mnt->mnt_sb->s_readonly_remount) in mnt_is_readonly()
292 return __mnt_is_readonly(mnt); in mnt_is_readonly()
313 struct mount *mnt = real_mount(m); in __mnt_want_write() local
317 mnt_inc_writers(mnt); in __mnt_want_write()
324 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) in __mnt_want_write()
333 mnt_dec_writers(mnt); in __mnt_want_write()
374 int mnt_clone_write(struct vfsmount *mnt) in mnt_clone_write() argument
377 if (__mnt_is_readonly(mnt)) in mnt_clone_write()
380 mnt_inc_writers(real_mount(mnt)); in mnt_clone_write()
396 return __mnt_want_write(file->f_path.mnt); in __mnt_want_write_file()
398 return mnt_clone_write(file->f_path.mnt); in __mnt_want_write_file()
428 void __mnt_drop_write(struct vfsmount *mnt) in __mnt_drop_write() argument
431 mnt_dec_writers(real_mount(mnt)); in __mnt_drop_write()
443 void mnt_drop_write(struct vfsmount *mnt) in mnt_drop_write() argument
445 __mnt_drop_write(mnt); in mnt_drop_write()
446 sb_end_write(mnt->mnt_sb); in mnt_drop_write()
452 __mnt_drop_write(file->f_path.mnt); in __mnt_drop_write_file()
462 static int mnt_make_readonly(struct mount *mnt) in mnt_make_readonly() argument
467 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; in mnt_make_readonly()
490 if (mnt_get_writers(mnt) > 0) in mnt_make_readonly()
493 mnt->mnt.mnt_flags |= MNT_READONLY; in mnt_make_readonly()
499 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; in mnt_make_readonly()
504 static int __mnt_unmake_readonly(struct mount *mnt) in __mnt_unmake_readonly() argument
507 mnt->mnt.mnt_flags &= ~MNT_READONLY; in __mnt_unmake_readonly()
514 struct mount *mnt; in sb_prepare_remount_readonly() local
522 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { in sb_prepare_remount_readonly()
523 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { in sb_prepare_remount_readonly()
524 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; in sb_prepare_remount_readonly()
526 if (mnt_get_writers(mnt) > 0) { in sb_prepare_remount_readonly()
539 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { in sb_prepare_remount_readonly()
540 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) in sb_prepare_remount_readonly()
541 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; in sb_prepare_remount_readonly()
548 static void free_vfsmnt(struct mount *mnt) in free_vfsmnt() argument
550 kfree_const(mnt->mnt_devname); in free_vfsmnt()
552 free_percpu(mnt->mnt_pcp); in free_vfsmnt()
554 kmem_cache_free(mnt_cache, mnt); in free_vfsmnt()
565 struct mount *mnt; in __legitimize_mnt() local
570 mnt = real_mount(bastard); in __legitimize_mnt()
571 mnt_add_count(mnt, 1); in __legitimize_mnt()
576 mnt_add_count(mnt, -1); in __legitimize_mnt()
581 mnt_add_count(mnt, -1); in __legitimize_mnt()
608 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) in __lookup_mnt() argument
610 struct hlist_head *head = m_hash(mnt, dentry); in __lookup_mnt()
614 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) in __lookup_mnt()
644 child_mnt = __lookup_mnt(path->mnt, path->dentry); in lookup_mnt()
645 m = child_mnt ? &child_mnt->mnt : NULL; in lookup_mnt()
669 struct mount *mnt; in __is_local_mountpoint() local
676 list_for_each_entry(mnt, &ns->list, mnt_list) { in __is_local_mountpoint()
677 is_covered = (mnt->mnt_mountpoint == dentry); in __is_local_mountpoint()
774 static inline int check_mnt(struct mount *mnt) in check_mnt() argument
776 return mnt->mnt_ns == current->nsproxy->mnt_ns; in check_mnt()
804 static struct mountpoint *unhash_mnt(struct mount *mnt) in unhash_mnt() argument
807 mnt->mnt_parent = mnt; in unhash_mnt()
808 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in unhash_mnt()
809 list_del_init(&mnt->mnt_child); in unhash_mnt()
810 hlist_del_init_rcu(&mnt->mnt_hash); in unhash_mnt()
811 hlist_del_init(&mnt->mnt_mp_list); in unhash_mnt()
812 mp = mnt->mnt_mp; in unhash_mnt()
813 mnt->mnt_mp = NULL; in unhash_mnt()
820 static void umount_mnt(struct mount *mnt) in umount_mnt() argument
822 put_mountpoint(unhash_mnt(mnt)); in umount_mnt()
828 void mnt_set_mountpoint(struct mount *mnt, in mnt_set_mountpoint() argument
833 mnt_add_count(mnt, 1); /* essentially, that's mntget */ in mnt_set_mountpoint()
835 child_mnt->mnt_parent = mnt; in mnt_set_mountpoint()
840 static void __attach_mnt(struct mount *mnt, struct mount *parent) in __attach_mnt() argument
842 hlist_add_head_rcu(&mnt->mnt_hash, in __attach_mnt()
843 m_hash(&parent->mnt, mnt->mnt_mountpoint)); in __attach_mnt()
844 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); in __attach_mnt()
850 static void attach_mnt(struct mount *mnt, in attach_mnt() argument
854 mnt_set_mountpoint(parent, mp, mnt); in attach_mnt()
855 __attach_mnt(mnt, parent); in attach_mnt()
858 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) in mnt_change_mountpoint() argument
860 struct mountpoint *old_mp = mnt->mnt_mp; in mnt_change_mountpoint()
861 struct mount *old_parent = mnt->mnt_parent; in mnt_change_mountpoint()
863 list_del_init(&mnt->mnt_child); in mnt_change_mountpoint()
864 hlist_del_init(&mnt->mnt_mp_list); in mnt_change_mountpoint()
865 hlist_del_init_rcu(&mnt->mnt_hash); in mnt_change_mountpoint()
867 attach_mnt(mnt, parent, mp); in mnt_change_mountpoint()
876 static void commit_tree(struct mount *mnt) in commit_tree() argument
878 struct mount *parent = mnt->mnt_parent; in commit_tree()
883 BUG_ON(parent == mnt); in commit_tree()
885 list_add_tail(&head, &mnt->mnt_list); in commit_tree()
894 __attach_mnt(mnt, parent); in commit_tree()
935 struct mount *mnt; in vfs_create_mount() local
940 mnt = alloc_vfsmnt(fc->source ?: "none"); in vfs_create_mount()
941 if (!mnt) in vfs_create_mount()
945 mnt->mnt.mnt_flags = MNT_INTERNAL; in vfs_create_mount()
948 mnt->mnt.mnt_sb = fc->root->d_sb; in vfs_create_mount()
949 mnt->mnt.mnt_root = dget(fc->root); in vfs_create_mount()
950 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in vfs_create_mount()
951 mnt->mnt_parent = mnt; in vfs_create_mount()
954 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts); in vfs_create_mount()
956 return &mnt->mnt; in vfs_create_mount()
976 struct vfsmount *mnt; in vfs_kern_mount() local
992 mnt = fc_mount(fc); in vfs_kern_mount()
994 mnt = ERR_PTR(ret); in vfs_kern_mount()
997 return mnt; in vfs_kern_mount()
1019 struct super_block *sb = old->mnt.mnt_sb; in clone_mnt()
1020 struct mount *mnt; in clone_mnt() local
1023 mnt = alloc_vfsmnt(old->mnt_devname); in clone_mnt()
1024 if (!mnt) in clone_mnt()
1028 mnt->mnt_group_id = 0; /* not a peer of original */ in clone_mnt()
1030 mnt->mnt_group_id = old->mnt_group_id; in clone_mnt()
1032 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { in clone_mnt()
1033 err = mnt_alloc_group_id(mnt); in clone_mnt()
1038 mnt->mnt.mnt_flags = old->mnt.mnt_flags; in clone_mnt()
1039 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); in clone_mnt()
1042 mnt->mnt.mnt_sb = sb; in clone_mnt()
1043 mnt->mnt.mnt_root = dget(root); in clone_mnt()
1044 mnt->mnt_mountpoint = mnt->mnt.mnt_root; in clone_mnt()
1045 mnt->mnt_parent = mnt; in clone_mnt()
1047 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); in clone_mnt()
1052 list_add(&mnt->mnt_slave, &old->mnt_slave_list); in clone_mnt()
1053 mnt->mnt_master = old; in clone_mnt()
1054 CLEAR_MNT_SHARED(mnt); in clone_mnt()
1057 list_add(&mnt->mnt_share, &old->mnt_share); in clone_mnt()
1059 list_add(&mnt->mnt_slave, &old->mnt_slave); in clone_mnt()
1060 mnt->mnt_master = old->mnt_master; in clone_mnt()
1062 CLEAR_MNT_SHARED(mnt); in clone_mnt()
1065 set_mnt_shared(mnt); in clone_mnt()
1071 list_add(&mnt->mnt_expire, &old->mnt_expire); in clone_mnt()
1074 return mnt; in clone_mnt()
1077 mnt_free_id(mnt); in clone_mnt()
1078 free_vfsmnt(mnt); in clone_mnt()
1082 static void cleanup_mnt(struct mount *mnt) in cleanup_mnt() argument
1093 WARN_ON(mnt_get_writers(mnt)); in cleanup_mnt()
1094 if (unlikely(mnt->mnt_pins.first)) in cleanup_mnt()
1095 mnt_pin_kill(mnt); in cleanup_mnt()
1096 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { in cleanup_mnt()
1098 mntput(&m->mnt); in cleanup_mnt()
1100 fsnotify_vfsmount_delete(&mnt->mnt); in cleanup_mnt()
1101 dput(mnt->mnt.mnt_root); in cleanup_mnt()
1102 deactivate_super(mnt->mnt.mnt_sb); in cleanup_mnt()
1103 mnt_free_id(mnt); in cleanup_mnt()
1104 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); in cleanup_mnt()
1123 static void mntput_no_expire(struct mount *mnt) in mntput_no_expire() argument
1128 if (likely(READ_ONCE(mnt->mnt_ns))) { in mntput_no_expire()
1138 mnt_add_count(mnt, -1); in mntput_no_expire()
1148 mnt_add_count(mnt, -1); in mntput_no_expire()
1149 if (mnt_get_count(mnt)) { in mntput_no_expire()
1154 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { in mntput_no_expire()
1159 mnt->mnt.mnt_flags |= MNT_DOOMED; in mntput_no_expire()
1162 list_del(&mnt->mnt_instance); in mntput_no_expire()
1164 if (unlikely(!list_empty(&mnt->mnt_mounts))) { in mntput_no_expire()
1166 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { in mntput_no_expire()
1168 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); in mntput_no_expire()
1174 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { in mntput_no_expire()
1177 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); in mntput_no_expire()
1178 if (!task_work_add(task, &mnt->mnt_rcu, true)) in mntput_no_expire()
1181 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) in mntput_no_expire()
1185 cleanup_mnt(mnt); in mntput_no_expire()
1188 void mntput(struct vfsmount *mnt) in mntput() argument
1190 if (mnt) { in mntput()
1191 struct mount *m = real_mount(mnt); in mntput()
1200 struct vfsmount *mntget(struct vfsmount *mnt) in mntget() argument
1202 if (mnt) in mntget()
1203 mnt_add_count(real_mount(mnt), 1); in mntget()
1204 return mnt; in mntget()
1240 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); in mnt_clone_internal()
1243 p->mnt.mnt_flags |= MNT_INTERNAL; in mnt_clone_internal()
1244 return &p->mnt; in mnt_clone_internal()
1288 return p->show(m, &r->mnt); in m_show()
1309 struct mount *mnt = real_mount(m); in may_umount_tree() local
1317 for (p = mnt; p; p = next_mnt(p, mnt)) { in may_umount_tree()
1344 int may_umount(struct vfsmount *mnt) in may_umount() argument
1349 if (propagate_mount_busy(real_mount(mnt), 2)) in may_umount()
1379 mntput(&m->mnt); in namespace_unlock()
1394 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) in disconnect_mount() argument
1401 if (!mnt_has_parent(mnt)) in disconnect_mount()
1408 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) in disconnect_mount()
1416 if (IS_MNT_LOCKED(mnt)) in disconnect_mount()
1427 static void umount_tree(struct mount *mnt, enum umount_tree_flags how) in umount_tree() argument
1433 propagate_mount_unlock(mnt); in umount_tree()
1436 for (p = mnt; p; p = next_mnt(p, mnt)) { in umount_tree()
1437 p->mnt.mnt_flags |= MNT_UMOUNT; in umount_tree()
1463 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; in umount_tree()
1481 static void shrink_submounts(struct mount *mnt);
1506 static int do_umount(struct mount *mnt, int flags) in do_umount() argument
1508 struct super_block *sb = mnt->mnt.mnt_sb; in do_umount()
1511 retval = security_sb_umount(&mnt->mnt, flags); in do_umount()
1522 if (&mnt->mnt == current->fs->root.mnt || in do_umount()
1531 if (mnt_get_count(mnt) != 2) { in do_umount()
1537 if (!xchg(&mnt->mnt_expiry_mark, 1)) in do_umount()
1564 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { in do_umount()
1579 if (mnt->mnt.mnt_flags & MNT_LOCKED) in do_umount()
1584 if (!list_empty(&mnt->mnt_list)) in do_umount()
1585 umount_tree(mnt, UMOUNT_PROPAGATE); in do_umount()
1588 shrink_submounts(mnt); in do_umount()
1590 if (!propagate_mount_busy(mnt, 2)) { in do_umount()
1591 if (!list_empty(&mnt->mnt_list)) in do_umount()
1592 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); in do_umount()
1615 struct mount *mnt; in __detach_mounts() local
1625 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); in __detach_mounts()
1626 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { in __detach_mounts()
1627 umount_mnt(mnt); in __detach_mounts()
1628 hlist_add_head(&mnt->mnt_umount, &unmounted); in __detach_mounts()
1630 else umount_tree(mnt, UMOUNT_CONNECTED); in __detach_mounts()
1670 struct mount *mnt; in ksys_umount() local
1686 mnt = real_mount(path.mnt); in ksys_umount()
1688 if (path.dentry != path.mnt->mnt_root) in ksys_umount()
1690 if (!check_mnt(mnt)) in ksys_umount()
1692 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ in ksys_umount()
1698 retval = do_umount(mnt, flags); in ksys_umount()
1702 mntput_no_expire(mnt); in ksys_umount()
1749 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, in copy_tree() argument
1754 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) in copy_tree()
1760 res = q = clone_mnt(mnt, dentry, flag); in copy_tree()
1764 q->mnt_mountpoint = mnt->mnt_mountpoint; in copy_tree()
1766 p = mnt; in copy_tree()
1767 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { in copy_tree()
1775 if (s->mnt.mnt_flags & MNT_LOCKED) { in copy_tree()
1785 is_mnt_ns_file(s->mnt.mnt_root)) { in copy_tree()
1795 q = clone_mnt(p, p->mnt.mnt_root, flag); in copy_tree()
1820 if (!check_mnt(real_mount(path->mnt))) in collect_mounts()
1823 tree = copy_tree(real_mount(path->mnt), path->dentry, in collect_mounts()
1828 return &tree->mnt; in collect_mounts()
1834 void dissolve_on_fput(struct vfsmount *mnt) in dissolve_on_fput() argument
1839 ns = real_mount(mnt)->mnt_ns; in dissolve_on_fput()
1842 umount_tree(real_mount(mnt), UMOUNT_CONNECTED); in dissolve_on_fput()
1852 void drop_collected_mounts(struct vfsmount *mnt) in drop_collected_mounts() argument
1856 umount_tree(real_mount(mnt), 0); in drop_collected_mounts()
1872 struct mount *old_mnt = real_mount(path->mnt); in clone_private_mount()
1882 return &new_mnt->mnt; in clone_private_mount()
1889 struct mount *mnt; in iterate_mounts() local
1893 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { in iterate_mounts()
1894 res = f(&mnt->mnt, arg); in iterate_mounts()
1901 static void lock_mnt_tree(struct mount *mnt) in lock_mnt_tree() argument
1905 for (p = mnt; p; p = next_mnt(p, mnt)) { in lock_mnt_tree()
1906 int flags = p->mnt.mnt_flags; in lock_mnt_tree()
1924 p->mnt.mnt_flags = flags; in lock_mnt_tree()
1928 static void cleanup_group_ids(struct mount *mnt, struct mount *end) in cleanup_group_ids() argument
1932 for (p = mnt; p != end; p = next_mnt(p, mnt)) { in cleanup_group_ids()
1938 static int invent_group_ids(struct mount *mnt, bool recurse) in invent_group_ids() argument
1942 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { in invent_group_ids()
1946 cleanup_group_ids(mnt, p); in invent_group_ids()
1955 int count_mounts(struct mnt_namespace *ns, struct mount *mnt) in count_mounts() argument
1961 for (p = mnt; p; p = next_mnt(p, mnt)) in count_mounts()
2056 smp = get_mountpoint(source_mnt->mnt.mnt_root); in attach_recursive_mnt()
2096 q = __lookup_mnt(&child->mnt_parent->mnt, in attach_recursive_mnt()
2103 child->mnt.mnt_flags &= ~MNT_LOCKED; in attach_recursive_mnt()
2131 struct vfsmount *mnt; in lock_mount() local
2140 mnt = lookup_mnt(path); in lock_mount()
2141 if (likely(!mnt)) { in lock_mount()
2153 path->mnt = mnt; in lock_mount()
2154 dentry = path->dentry = dget(mnt->mnt_root); in lock_mount()
2170 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) in graft_tree() argument
2172 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) in graft_tree()
2176 d_is_dir(mnt->mnt.mnt_root)) in graft_tree()
2179 return attach_recursive_mnt(mnt, p, mp, false); in graft_tree()
2205 struct mount *mnt = real_mount(path->mnt); in do_change_type() local
2210 if (path->dentry != path->mnt->mnt_root) in do_change_type()
2219 err = invent_group_ids(mnt, recurse); in do_change_type()
2225 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) in do_change_type()
2234 static bool has_locked_children(struct mount *mnt, struct dentry *dentry) in has_locked_children() argument
2237 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { in has_locked_children()
2241 if (child->mnt.mnt_flags & MNT_LOCKED) in has_locked_children()
2249 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); in __do_loopback() local
2252 return mnt; in __do_loopback()
2255 return mnt; in __do_loopback()
2258 return mnt; in __do_loopback()
2261 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); in __do_loopback()
2263 mnt = clone_mnt(old, old_path->dentry, 0); in __do_loopback()
2265 if (!IS_ERR(mnt)) in __do_loopback()
2266 mnt->mnt.mnt_flags &= ~MNT_LOCKED; in __do_loopback()
2268 return mnt; in __do_loopback()
2278 struct mount *mnt = NULL, *parent; in do_loopback() local
2297 parent = real_mount(path->mnt); in do_loopback()
2301 mnt = __do_loopback(&old_path, recurse); in do_loopback()
2302 if (IS_ERR(mnt)) { in do_loopback()
2303 err = PTR_ERR(mnt); in do_loopback()
2307 err = graft_tree(mnt, parent, mp); in do_loopback()
2310 umount_tree(mnt, UMOUNT_SYNC); in do_loopback()
2324 struct mount *mnt, *p; in open_detached_copy() local
2331 mnt = __do_loopback(path, recursive); in open_detached_copy()
2332 if (IS_ERR(mnt)) { in open_detached_copy()
2335 return ERR_CAST(mnt); in open_detached_copy()
2339 for (p = mnt; p; p = next_mnt(p, mnt)) { in open_detached_copy()
2343 ns->root = mnt; in open_detached_copy()
2344 list_add_tail(&ns->list, &mnt->mnt_list); in open_detached_copy()
2345 mntget(&mnt->mnt); in open_detached_copy()
2349 mntput(path->mnt); in open_detached_copy()
2350 path->mnt = &mnt->mnt; in open_detached_copy()
2353 dissolve_on_fput(path->mnt); in open_detached_copy()
2416 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) in can_change_locked_flags() argument
2418 unsigned int fl = mnt->mnt.mnt_flags; in can_change_locked_flags()
2443 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) in change_mount_ro_state() argument
2447 if (readonly_request == __mnt_is_readonly(&mnt->mnt)) in change_mount_ro_state()
2451 return mnt_make_readonly(mnt); in change_mount_ro_state()
2453 return __mnt_unmake_readonly(mnt); in change_mount_ro_state()
2460 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) in set_mount_attributes() argument
2463 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; in set_mount_attributes()
2464 mnt->mnt.mnt_flags = mnt_flags; in set_mount_attributes()
2465 touch_mnt_namespace(mnt->mnt_ns); in set_mount_attributes()
2469 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) in mnt_warn_timestamp_expiry() argument
2471 struct super_block *sb = mnt->mnt_sb; in mnt_warn_timestamp_expiry()
2473 if (!__mnt_is_readonly(mnt) && in mnt_warn_timestamp_expiry()
2483 is_mounted(mnt) ? "remounted" : "mounted", in mnt_warn_timestamp_expiry()
2498 struct super_block *sb = path->mnt->mnt_sb; in do_reconfigure_mnt()
2499 struct mount *mnt = real_mount(path->mnt); in do_reconfigure_mnt() local
2502 if (!check_mnt(mnt)) in do_reconfigure_mnt()
2505 if (path->dentry != mnt->mnt.mnt_root) in do_reconfigure_mnt()
2508 if (!can_change_locked_flags(mnt, mnt_flags)) in do_reconfigure_mnt()
2512 ret = change_mount_ro_state(mnt, mnt_flags); in do_reconfigure_mnt()
2514 set_mount_attributes(mnt, mnt_flags); in do_reconfigure_mnt()
2517 mnt_warn_timestamp_expiry(path, &mnt->mnt); in do_reconfigure_mnt()
2531 struct super_block *sb = path->mnt->mnt_sb; in do_remount()
2532 struct mount *mnt = real_mount(path->mnt); in do_remount() local
2535 if (!check_mnt(mnt)) in do_remount()
2538 if (path->dentry != path->mnt->mnt_root) in do_remount()
2541 if (!can_change_locked_flags(mnt, mnt_flags)) in do_remount()
2555 set_mount_attributes(mnt, mnt_flags); in do_remount()
2560 mnt_warn_timestamp_expiry(path, &mnt->mnt); in do_remount()
2566 static inline int tree_contains_unbindable(struct mount *mnt) in tree_contains_unbindable() argument
2569 for (p = mnt; p; p = next_mnt(p, mnt)) { in tree_contains_unbindable()
2589 if (mnt_ns_loop(p->mnt.mnt_root)) in check_for_nsfs_mounts()
2612 old = real_mount(old_path->mnt); in do_move_mount()
2613 p = real_mount(new_path->mnt); in do_move_mount()
2625 if (!is_mounted(&old->mnt)) in do_move_mount()
2632 if (old->mnt.mnt_flags & MNT_LOCKED) in do_move_mount()
2635 if (old_path->dentry != old_path->mnt->mnt_root) in do_move_mount()
2659 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, in do_move_mount()
2712 parent = real_mount(path->mnt); in do_add_mount()
2725 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && in do_add_mount()
2726 path->mnt->mnt_root == path->dentry) in do_add_mount()
2730 if (d_is_symlink(newmnt->mnt.mnt_root)) in do_add_mount()
2733 newmnt->mnt.mnt_flags = mnt_flags; in do_add_mount()
2750 struct vfsmount *mnt; in do_new_mount_fc() local
2765 mnt = vfs_create_mount(fc); in do_new_mount_fc()
2766 if (IS_ERR(mnt)) in do_new_mount_fc()
2767 return PTR_ERR(mnt); in do_new_mount_fc()
2769 mnt_warn_timestamp_expiry(mountpoint, mnt); in do_new_mount_fc()
2771 error = do_add_mount(real_mount(mnt), mountpoint, mnt_flags); in do_new_mount_fc()
2773 mntput(mnt); in do_new_mount_fc()
2832 struct mount *mnt = real_mount(m); in finish_automount() local
2837 BUG_ON(mnt_get_count(mnt) < 2); in finish_automount()
2839 if (m->mnt_sb == path->mnt->mnt_sb && in finish_automount()
2845 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); in finish_automount()
2850 if (!list_empty(&mnt->mnt_expire)) { in finish_automount()
2852 list_del_init(&mnt->mnt_expire); in finish_automount()
2865 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) in mnt_set_expiry() argument
2869 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); in mnt_set_expiry()
2882 struct mount *mnt, *next; in mark_mounts_for_expiry() local
2897 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { in mark_mounts_for_expiry()
2898 if (!xchg(&mnt->mnt_expiry_mark, 1) || in mark_mounts_for_expiry()
2899 propagate_mount_busy(mnt, 1)) in mark_mounts_for_expiry()
2901 list_move(&mnt->mnt_expire, &graveyard); in mark_mounts_for_expiry()
2904 mnt = list_first_entry(&graveyard, struct mount, mnt_expire); in mark_mounts_for_expiry()
2905 touch_mnt_namespace(mnt->mnt_ns); in mark_mounts_for_expiry()
2906 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); in mark_mounts_for_expiry()
2931 struct mount *mnt = list_entry(tmp, struct mount, mnt_child); in select_submounts() local
2934 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) in select_submounts()
2939 if (!list_empty(&mnt->mnt_mounts)) { in select_submounts()
2940 this_parent = mnt; in select_submounts()
2944 if (!propagate_mount_busy(mnt, 1)) { in select_submounts()
2945 list_move_tail(&mnt->mnt_expire, graveyard); in select_submounts()
2966 static void shrink_submounts(struct mount *mnt) in shrink_submounts() argument
2972 while (select_submounts(mnt, &graveyard)) { in shrink_submounts()
3118 mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; in do_mount()
3240 new = copy_tree(old, old->mnt.mnt_root, copy_flags); in copy_mnt_ns()
3265 if (&p->mnt == new_fs->root.mnt) { in copy_mnt_ns()
3266 new_fs->root.mnt = mntget(&q->mnt); in copy_mnt_ns()
3267 rootmnt = &p->mnt; in copy_mnt_ns()
3269 if (&p->mnt == new_fs->pwd.mnt) { in copy_mnt_ns()
3270 new_fs->pwd.mnt = mntget(&q->mnt); in copy_mnt_ns()
3271 pwdmnt = &p->mnt; in copy_mnt_ns()
3278 while (p->mnt.mnt_root != q->mnt.mnt_root) in copy_mnt_ns()
3293 struct mount *mnt = real_mount(m); in mount_subtree() local
3304 mnt->mnt_ns = ns; in mount_subtree()
3305 ns->root = mnt; in mount_subtree()
3307 list_add(&mnt->mnt_list, &ns->list); in mount_subtree()
3318 s = path.mnt->mnt_sb; in mount_subtree()
3320 mntput(path.mnt); in mount_subtree()
3379 struct mount *mnt; in SYSCALL_DEFINE3() local
3455 newmount.mnt = vfs_create_mount(fc); in SYSCALL_DEFINE3()
3456 if (IS_ERR(newmount.mnt)) { in SYSCALL_DEFINE3()
3457 ret = PTR_ERR(newmount.mnt); in SYSCALL_DEFINE3()
3461 newmount.mnt->mnt_flags = mnt_flags; in SYSCALL_DEFINE3()
3475 mnt = real_mount(newmount.mnt); in SYSCALL_DEFINE3()
3476 mnt->mnt_ns = ns; in SYSCALL_DEFINE3()
3477 ns->root = mnt; in SYSCALL_DEFINE3()
3479 list_add(&mnt->mnt_list, &ns->list); in SYSCALL_DEFINE3()
3480 mntget(newmount.mnt); in SYSCALL_DEFINE3()
3487 dissolve_on_fput(newmount.mnt); in SYSCALL_DEFINE3()
3571 bool is_path_reachable(struct mount *mnt, struct dentry *dentry, in is_path_reachable() argument
3574 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { in is_path_reachable()
3575 dentry = mnt->mnt_mountpoint; in is_path_reachable()
3576 mnt = mnt->mnt_parent; in is_path_reachable()
3578 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); in is_path_reachable()
3585 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); in path_is_under()
3648 new_mnt = real_mount(new.mnt); in SYSCALL_DEFINE2()
3649 root_mnt = real_mount(root.mnt); in SYSCALL_DEFINE2()
3650 old_mnt = real_mount(old.mnt); in SYSCALL_DEFINE2()
3659 if (new_mnt->mnt.mnt_flags & MNT_LOCKED) in SYSCALL_DEFINE2()
3668 if (root.mnt->mnt_root != root.dentry) in SYSCALL_DEFINE2()
3672 if (new.mnt->mnt_root != new.dentry) in SYSCALL_DEFINE2()
3685 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { in SYSCALL_DEFINE2()
3686 new_mnt->mnt.mnt_flags |= MNT_LOCKED; in SYSCALL_DEFINE2()
3687 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; in SYSCALL_DEFINE2()
3717 struct vfsmount *mnt; in init_mount_tree() local
3722 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL); in init_mount_tree()
3723 if (IS_ERR(mnt)) in init_mount_tree()
3729 m = real_mount(mnt); in init_mount_tree()
3737 root.mnt = mnt; in init_mount_tree()
3738 root.dentry = mnt->mnt_root; in init_mount_tree()
3739 mnt->mnt_flags |= MNT_LOCKED; in init_mount_tree()
3784 drop_collected_mounts(&ns->root->mnt); in put_mnt_ns()
3790 struct vfsmount *mnt; in kern_mount() local
3791 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL); in kern_mount()
3792 if (!IS_ERR(mnt)) { in kern_mount()
3797 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; in kern_mount()
3799 return mnt; in kern_mount()
3803 void kern_unmount(struct vfsmount *mnt) in kern_unmount() argument
3806 if (!IS_ERR_OR_NULL(mnt)) { in kern_unmount()
3807 real_mount(mnt)->mnt_ns = NULL; in kern_unmount()
3809 mntput(mnt); in kern_unmount()
3814 bool our_mnt(struct vfsmount *mnt) in our_mnt() argument
3816 return check_mnt(real_mount(mnt)); in our_mnt()
3827 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; in current_chrooted()
3828 ns_root.dentry = ns_root.mnt->mnt_root; in current_chrooted()
3848 struct mount *mnt; in mnt_already_visible() local
3852 list_for_each_entry(mnt, &ns->list, mnt_list) { in mnt_already_visible()
3856 if (mnt->mnt.mnt_sb->s_type != sb->s_type) in mnt_already_visible()
3862 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) in mnt_already_visible()
3866 mnt_flags = mnt->mnt.mnt_flags; in mnt_already_visible()
3869 if (sb_rdonly(mnt->mnt.mnt_sb)) in mnt_already_visible()
3886 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { in mnt_already_visible()
3889 if (!(child->mnt.mnt_flags & MNT_LOCKED)) in mnt_already_visible()
3930 bool mnt_may_suid(struct vfsmount *mnt) in mnt_may_suid() argument
3939 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) && in mnt_may_suid()
3940 current_in_userns(mnt->mnt_sb->s_user_ns); in mnt_may_suid()
3987 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt, in mntns_install()