1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * fs/inotify_user.c - inotify support for userspace
4 *
5 * Authors:
6 * John McCutchan <ttb@tentacle.dhs.org>
7 * Robert Love <rml@novell.com>
8 *
9 * Copyright (C) 2005 John McCutchan
10 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 *
12 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 */
15
16 #include <linux/dcache.h> /* d_unlinked */
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/inotify.h>
20 #include <linux/path.h> /* struct path */
21 #include <linux/slab.h> /* kmem_* */
22 #include <linux/types.h>
23 #include <linux/sched.h>
24 #include <linux/sched/user.h>
25 #include <linux/sched/mm.h>
26
27 #include "inotify.h"
28
29 /*
30 * Check if 2 events contain the same information.
31 */
event_compare(struct fsnotify_event * old_fsn,struct fsnotify_event * new_fsn)32 static bool event_compare(struct fsnotify_event *old_fsn,
33 struct fsnotify_event *new_fsn)
34 {
35 struct inotify_event_info *old, *new;
36
37 old = INOTIFY_E(old_fsn);
38 new = INOTIFY_E(new_fsn);
39 if (old->mask & FS_IN_IGNORED)
40 return false;
41 if ((old->mask == new->mask) &&
42 (old_fsn->inode == new_fsn->inode) &&
43 (old->name_len == new->name_len) &&
44 (!old->name_len || !strcmp(old->name, new->name)))
45 return true;
46 return false;
47 }
48
inotify_merge(struct list_head * list,struct fsnotify_event * event)49 static int inotify_merge(struct list_head *list,
50 struct fsnotify_event *event)
51 {
52 struct fsnotify_event *last_event;
53
54 last_event = list_entry(list->prev, struct fsnotify_event, list);
55 return event_compare(last_event, event);
56 }
57
inotify_handle_event(struct fsnotify_group * group,struct inode * inode,u32 mask,const void * data,int data_type,const struct qstr * file_name,u32 cookie,struct fsnotify_iter_info * iter_info)58 int inotify_handle_event(struct fsnotify_group *group,
59 struct inode *inode,
60 u32 mask, const void *data, int data_type,
61 const struct qstr *file_name, u32 cookie,
62 struct fsnotify_iter_info *iter_info)
63 {
64 struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info);
65 struct inotify_inode_mark *i_mark;
66 struct inotify_event_info *event;
67 struct fsnotify_event *fsn_event;
68 int ret;
69 int len = 0;
70 int alloc_len = sizeof(struct inotify_event_info);
71
72 if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info)))
73 return 0;
74
75 if ((inode_mark->mask & FS_EXCL_UNLINK) &&
76 (data_type == FSNOTIFY_EVENT_PATH)) {
77 const struct path *path = data;
78
79 if (d_unlinked(path->dentry))
80 return 0;
81 }
82 if (file_name) {
83 len = file_name->len;
84 alloc_len += len + 1;
85 }
86
87 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
88 mask);
89
90 i_mark = container_of(inode_mark, struct inotify_inode_mark,
91 fsn_mark);
92
93 /*
94 * Whoever is interested in the event, pays for the allocation. Do not
95 * trigger OOM killer in the target monitoring memcg as it may have
96 * security repercussion.
97 */
98 memalloc_use_memcg(group->memcg);
99 event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
100 memalloc_unuse_memcg();
101
102 if (unlikely(!event)) {
103 /*
104 * Treat lost event due to ENOMEM the same way as queue
105 * overflow to let userspace know event was lost.
106 */
107 fsnotify_queue_overflow(group);
108 return -ENOMEM;
109 }
110
111 /*
112 * We now report FS_ISDIR flag with MOVE_SELF and DELETE_SELF events
113 * for fanotify. inotify never reported IN_ISDIR with those events.
114 * It looks like an oversight, but to avoid the risk of breaking
115 * existing inotify programs, mask the flag out from those events.
116 */
117 if (mask & (IN_MOVE_SELF | IN_DELETE_SELF))
118 mask &= ~IN_ISDIR;
119
120 fsn_event = &event->fse;
121 fsnotify_init_event(fsn_event, inode);
122 event->mask = mask;
123 event->wd = i_mark->wd;
124 event->sync_cookie = cookie;
125 event->name_len = len;
126 if (len)
127 strcpy(event->name, file_name->name);
128
129 ret = fsnotify_add_event(group, fsn_event, inotify_merge);
130 if (ret) {
131 /* Our event wasn't used in the end. Free it. */
132 fsnotify_destroy_event(group, fsn_event);
133 }
134
135 if (inode_mark->mask & IN_ONESHOT)
136 fsnotify_destroy_mark(inode_mark, group);
137
138 return 0;
139 }
140
inotify_freeing_mark(struct fsnotify_mark * fsn_mark,struct fsnotify_group * group)141 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
142 {
143 inotify_ignored_and_remove_idr(fsn_mark, group);
144 }
145
146 /*
147 * This is NEVER supposed to be called. Inotify marks should either have been
148 * removed from the idr when the watch was removed or in the
149 * fsnotify_destroy_mark_by_group() call when the inotify instance was being
150 * torn down. This is only called if the idr is about to be freed but there
151 * are still marks in it.
152 */
idr_callback(int id,void * p,void * data)153 static int idr_callback(int id, void *p, void *data)
154 {
155 struct fsnotify_mark *fsn_mark;
156 struct inotify_inode_mark *i_mark;
157 static bool warned = false;
158
159 if (warned)
160 return 0;
161
162 warned = true;
163 fsn_mark = p;
164 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
165
166 WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
167 "idr. Probably leaking memory\n", id, p, data);
168
169 /*
170 * I'm taking the liberty of assuming that the mark in question is a
171 * valid address and I'm dereferencing it. This might help to figure
172 * out why we got here and the panic is no worse than the original
173 * BUG() that was here.
174 */
175 if (fsn_mark)
176 printk(KERN_WARNING "fsn_mark->group=%p wd=%d\n",
177 fsn_mark->group, i_mark->wd);
178 return 0;
179 }
180
inotify_free_group_priv(struct fsnotify_group * group)181 static void inotify_free_group_priv(struct fsnotify_group *group)
182 {
183 /* ideally the idr is empty and we won't hit the BUG in the callback */
184 idr_for_each(&group->inotify_data.idr, idr_callback, group);
185 idr_destroy(&group->inotify_data.idr);
186 if (group->inotify_data.ucounts)
187 dec_inotify_instances(group->inotify_data.ucounts);
188 }
189
inotify_free_event(struct fsnotify_event * fsn_event)190 static void inotify_free_event(struct fsnotify_event *fsn_event)
191 {
192 kfree(INOTIFY_E(fsn_event));
193 }
194
195 /* ding dong the mark is dead */
inotify_free_mark(struct fsnotify_mark * fsn_mark)196 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
197 {
198 struct inotify_inode_mark *i_mark;
199
200 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
201
202 kmem_cache_free(inotify_inode_mark_cachep, i_mark);
203 }
204
205 const struct fsnotify_ops inotify_fsnotify_ops = {
206 .handle_event = inotify_handle_event,
207 .free_group_priv = inotify_free_group_priv,
208 .free_event = inotify_free_event,
209 .freeing_mark = inotify_freeing_mark,
210 .free_mark = inotify_free_mark,
211 };
212