1 /*
2 * memfd_create system call and file sealing support
3 *
4 * Code was originally included in shmem.c, and broken out to facilitate
5 * use by hugetlbfs as well as tmpfs.
6 *
7 * This file is released under the GPL.
8 */
9
10 #include <linux/fs.h>
11 #include <linux/vfs.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/khugepaged.h>
17 #include <linux/syscalls.h>
18 #include <linux/hugetlb.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/memfd.h>
21 #include <linux/pid_namespace.h>
22 #include <uapi/linux/memfd.h>
23
24 /*
25 * We need a tag: a new tag would expand every xa_node by 8 bytes,
26 * so reuse a tag which we firmly believe is never set or cleared on tmpfs
27 * or hugetlbfs because they are memory only filesystems.
28 */
29 #define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE
30 #define LAST_SCAN 4 /* about 150ms max */
31
memfd_tag_pins(struct xa_state * xas)32 static void memfd_tag_pins(struct xa_state *xas)
33 {
34 struct page *page;
35 int latency = 0;
36 int cache_count;
37
38 lru_add_drain();
39
40 xas_lock_irq(xas);
41 xas_for_each(xas, page, ULONG_MAX) {
42 cache_count = 1;
43 if (!xa_is_value(page) &&
44 PageTransHuge(page) && !PageHuge(page))
45 cache_count = HPAGE_PMD_NR;
46
47 if (!xa_is_value(page) &&
48 page_count(page) - total_mapcount(page) != cache_count)
49 xas_set_mark(xas, MEMFD_TAG_PINNED);
50 if (cache_count != 1)
51 xas_set(xas, page->index + cache_count);
52
53 latency += cache_count;
54 if (latency < XA_CHECK_SCHED)
55 continue;
56 latency = 0;
57
58 xas_pause(xas);
59 xas_unlock_irq(xas);
60 cond_resched();
61 xas_lock_irq(xas);
62 }
63 xas_unlock_irq(xas);
64 }
65
66 /*
67 * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
68 * via get_user_pages(), drivers might have some pending I/O without any active
69 * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
70 * and see whether it has an elevated ref-count. If so, we tag them and wait for
71 * them to be dropped.
72 * The caller must guarantee that no new user will acquire writable references
73 * to those pages to avoid races.
74 */
memfd_wait_for_pins(struct address_space * mapping)75 static int memfd_wait_for_pins(struct address_space *mapping)
76 {
77 XA_STATE(xas, &mapping->i_pages, 0);
78 struct page *page;
79 int error, scan;
80
81 memfd_tag_pins(&xas);
82
83 error = 0;
84 for (scan = 0; scan <= LAST_SCAN; scan++) {
85 int latency = 0;
86 int cache_count;
87
88 if (!xas_marked(&xas, MEMFD_TAG_PINNED))
89 break;
90
91 if (!scan)
92 lru_add_drain_all();
93 else if (schedule_timeout_killable((HZ << scan) / 200))
94 scan = LAST_SCAN;
95
96 xas_set(&xas, 0);
97 xas_lock_irq(&xas);
98 xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
99 bool clear = true;
100
101 cache_count = 1;
102 if (!xa_is_value(page) &&
103 PageTransHuge(page) && !PageHuge(page))
104 cache_count = HPAGE_PMD_NR;
105
106 if (!xa_is_value(page) && cache_count !=
107 page_count(page) - total_mapcount(page)) {
108 /*
109 * On the last scan, we clean up all those tags
110 * we inserted; but make a note that we still
111 * found pages pinned.
112 */
113 if (scan == LAST_SCAN)
114 error = -EBUSY;
115 else
116 clear = false;
117 }
118 if (clear)
119 xas_clear_mark(&xas, MEMFD_TAG_PINNED);
120
121 latency += cache_count;
122 if (latency < XA_CHECK_SCHED)
123 continue;
124 latency = 0;
125
126 xas_pause(&xas);
127 xas_unlock_irq(&xas);
128 cond_resched();
129 xas_lock_irq(&xas);
130 }
131 xas_unlock_irq(&xas);
132 }
133
134 return error;
135 }
136
memfd_file_seals_ptr(struct file * file)137 static unsigned int *memfd_file_seals_ptr(struct file *file)
138 {
139 if (shmem_file(file))
140 return &SHMEM_I(file_inode(file))->seals;
141
142 #ifdef CONFIG_HUGETLBFS
143 if (is_file_hugepages(file))
144 return &HUGETLBFS_I(file_inode(file))->seals;
145 #endif
146
147 return NULL;
148 }
149
150 #define F_ALL_SEALS (F_SEAL_SEAL | \
151 F_SEAL_EXEC | \
152 F_SEAL_SHRINK | \
153 F_SEAL_GROW | \
154 F_SEAL_WRITE | \
155 F_SEAL_FUTURE_WRITE)
156
memfd_add_seals(struct file * file,unsigned int seals)157 static int memfd_add_seals(struct file *file, unsigned int seals)
158 {
159 struct inode *inode = file_inode(file);
160 unsigned int *file_seals;
161 int error;
162
163 /*
164 * SEALING
165 * Sealing allows multiple parties to share a tmpfs or hugetlbfs file
166 * but restrict access to a specific subset of file operations. Seals
167 * can only be added, but never removed. This way, mutually untrusted
168 * parties can share common memory regions with a well-defined policy.
169 * A malicious peer can thus never perform unwanted operations on a
170 * shared object.
171 *
172 * Seals are only supported on special tmpfs or hugetlbfs files and
173 * always affect the whole underlying inode. Once a seal is set, it
174 * may prevent some kinds of access to the file. Currently, the
175 * following seals are defined:
176 * SEAL_SEAL: Prevent further seals from being set on this file
177 * SEAL_SHRINK: Prevent the file from shrinking
178 * SEAL_GROW: Prevent the file from growing
179 * SEAL_WRITE: Prevent write access to the file
180 * SEAL_EXEC: Prevent modification of the exec bits in the file mode
181 *
182 * As we don't require any trust relationship between two parties, we
183 * must prevent seals from being removed. Therefore, sealing a file
184 * only adds a given set of seals to the file, it never touches
185 * existing seals. Furthermore, the "setting seals"-operation can be
186 * sealed itself, which basically prevents any further seal from being
187 * added.
188 *
189 * Semantics of sealing are only defined on volatile files. Only
190 * anonymous tmpfs and hugetlbfs files support sealing. More
191 * importantly, seals are never written to disk. Therefore, there's
192 * no plan to support it on other file types.
193 */
194
195 if (!(file->f_mode & FMODE_WRITE))
196 return -EPERM;
197 if (seals & ~(unsigned int)F_ALL_SEALS)
198 return -EINVAL;
199
200 inode_lock(inode);
201
202 file_seals = memfd_file_seals_ptr(file);
203 if (!file_seals) {
204 error = -EINVAL;
205 goto unlock;
206 }
207
208 if (*file_seals & F_SEAL_SEAL) {
209 error = -EPERM;
210 goto unlock;
211 }
212
213 if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) {
214 error = mapping_deny_writable(file->f_mapping);
215 if (error)
216 goto unlock;
217
218 error = memfd_wait_for_pins(file->f_mapping);
219 if (error) {
220 mapping_allow_writable(file->f_mapping);
221 goto unlock;
222 }
223 }
224
225 /*
226 * SEAL_EXEC implys SEAL_WRITE, making W^X from the start.
227 */
228 if (seals & F_SEAL_EXEC && inode->i_mode & 0111)
229 seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE;
230
231 *file_seals |= seals;
232 error = 0;
233
234 unlock:
235 inode_unlock(inode);
236 return error;
237 }
238
memfd_get_seals(struct file * file)239 static int memfd_get_seals(struct file *file)
240 {
241 unsigned int *seals = memfd_file_seals_ptr(file);
242
243 return seals ? *seals : -EINVAL;
244 }
245
memfd_fcntl(struct file * file,unsigned int cmd,unsigned int arg)246 long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
247 {
248 long error;
249
250 switch (cmd) {
251 case F_ADD_SEALS:
252 error = memfd_add_seals(file, arg);
253 break;
254 case F_GET_SEALS:
255 error = memfd_get_seals(file);
256 break;
257 default:
258 error = -EINVAL;
259 break;
260 }
261
262 return error;
263 }
264
265 #define MFD_NAME_PREFIX "memfd:"
266 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
267 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
268
269 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC)
270
check_sysctl_memfd_noexec(unsigned int * flags)271 static int check_sysctl_memfd_noexec(unsigned int *flags)
272 {
273 #ifdef CONFIG_SYSCTL
274 struct pid_namespace *ns = task_active_pid_ns(current);
275 int sysctl = pidns_memfd_noexec_scope(ns);
276
277 if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
278 if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL)
279 *flags |= MFD_NOEXEC_SEAL;
280 else
281 *flags |= MFD_EXEC;
282 }
283
284 if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) {
285 pr_err_ratelimited(
286 "%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n",
287 current->comm, task_pid_nr(current), sysctl);
288 return -EACCES;
289 }
290 #endif
291 return 0;
292 }
293
SYSCALL_DEFINE2(memfd_create,const char __user *,uname,unsigned int,flags)294 SYSCALL_DEFINE2(memfd_create,
295 const char __user *, uname,
296 unsigned int, flags)
297 {
298 unsigned int *file_seals;
299 struct file *file;
300 int fd, error;
301 char *name;
302 long len;
303
304 if (!(flags & MFD_HUGETLB)) {
305 if (flags & ~(unsigned int)MFD_ALL_FLAGS)
306 return -EINVAL;
307 } else {
308 /* Allow huge page size encoding in flags. */
309 if (flags & ~(unsigned int)(MFD_ALL_FLAGS |
310 (MFD_HUGE_MASK << MFD_HUGE_SHIFT)))
311 return -EINVAL;
312 }
313
314 /* Invalid if both EXEC and NOEXEC_SEAL are set.*/
315 if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL))
316 return -EINVAL;
317
318 if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
319 pr_warn_once(
320 "%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
321 current->comm, task_pid_nr(current));
322 }
323
324 error = check_sysctl_memfd_noexec(&flags);
325 if (error < 0)
326 return error;
327
328 /* length includes terminating zero */
329 len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
330 if (len <= 0)
331 return -EFAULT;
332 if (len > MFD_NAME_MAX_LEN + 1)
333 return -EINVAL;
334
335 name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL);
336 if (!name)
337 return -ENOMEM;
338
339 strcpy(name, MFD_NAME_PREFIX);
340 if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
341 error = -EFAULT;
342 goto err_name;
343 }
344
345 /* terminating-zero may have changed after strnlen_user() returned */
346 if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
347 error = -EFAULT;
348 goto err_name;
349 }
350
351 fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
352 if (fd < 0) {
353 error = fd;
354 goto err_name;
355 }
356
357 if (flags & MFD_HUGETLB) {
358 file = hugetlb_file_setup(name, 0, VM_NORESERVE,
359 HUGETLB_ANONHUGE_INODE,
360 (flags >> MFD_HUGE_SHIFT) &
361 MFD_HUGE_MASK);
362 } else
363 file = shmem_file_setup(name, 0, VM_NORESERVE);
364 if (IS_ERR(file)) {
365 error = PTR_ERR(file);
366 goto err_fd;
367 }
368 file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
369 file->f_flags |= O_LARGEFILE;
370
371 if (flags & MFD_NOEXEC_SEAL) {
372 struct inode *inode = file_inode(file);
373
374 inode->i_mode &= ~0111;
375 file_seals = memfd_file_seals_ptr(file);
376 if (file_seals) {
377 *file_seals &= ~F_SEAL_SEAL;
378 *file_seals |= F_SEAL_EXEC;
379 }
380 } else if (flags & MFD_ALLOW_SEALING) {
381 /* MFD_EXEC and MFD_ALLOW_SEALING are set */
382 file_seals = memfd_file_seals_ptr(file);
383 if (file_seals)
384 *file_seals &= ~F_SEAL_SEAL;
385 }
386
387 fd_install(fd, file);
388 kfree(name);
389 return fd;
390
391 err_fd:
392 put_unused_fd(fd);
393 err_name:
394 kfree(name);
395 return error;
396 }
397