1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/ipc/shm.c
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 *
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
26 */
27
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shm.h>
32 #include <linux/init.h>
33 #include <linux/file.h>
34 #include <linux/mman.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/audit.h>
39 #include <linux/capability.h>
40 #include <linux/ptrace.h>
41 #include <linux/seq_file.h>
42 #include <linux/rwsem.h>
43 #include <linux/nsproxy.h>
44 #include <linux/mount.h>
45 #include <linux/ipc_namespace.h>
46 #include <linux/rhashtable.h>
47
48 #include <linux/uaccess.h>
49
50 #include "util.h"
51
52 struct shmid_kernel /* private to the kernel */
53 {
54 struct kern_ipc_perm shm_perm;
55 struct file *shm_file;
56 unsigned long shm_nattch;
57 unsigned long shm_segsz;
58 time64_t shm_atim;
59 time64_t shm_dtim;
60 time64_t shm_ctim;
61 struct pid *shm_cprid;
62 struct pid *shm_lprid;
63 struct user_struct *mlock_user;
64
65 /* The task created the shm object. NULL if the task is dead. */
66 struct task_struct *shm_creator;
67 struct list_head shm_clist; /* list by creator */
68 } __randomize_layout;
69
70 /* shm_mode upper byte flags */
71 #define SHM_DEST 01000 /* segment will be destroyed on last detach */
72 #define SHM_LOCKED 02000 /* segment will not be swapped */
73
74 struct shm_file_data {
75 int id;
76 struct ipc_namespace *ns;
77 struct file *file;
78 const struct vm_operations_struct *vm_ops;
79 };
80
81 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
82
83 static const struct file_operations shm_file_operations;
84 static const struct vm_operations_struct shm_vm_ops;
85
86 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
87
88 #define shm_unlock(shp) \
89 ipc_unlock(&(shp)->shm_perm)
90
91 static int newseg(struct ipc_namespace *, struct ipc_params *);
92 static void shm_open(struct vm_area_struct *vma);
93 static void shm_close(struct vm_area_struct *vma);
94 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
95 #ifdef CONFIG_PROC_FS
96 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
97 #endif
98
shm_init_ns(struct ipc_namespace * ns)99 void shm_init_ns(struct ipc_namespace *ns)
100 {
101 ns->shm_ctlmax = SHMMAX;
102 ns->shm_ctlall = SHMALL;
103 ns->shm_ctlmni = SHMMNI;
104 ns->shm_rmid_forced = 0;
105 ns->shm_tot = 0;
106 ipc_init_ids(&shm_ids(ns));
107 }
108
109 /*
110 * Called with shm_ids.rwsem (writer) and the shp structure locked.
111 * Only shm_ids.rwsem remains locked on exit.
112 */
do_shm_rmid(struct ipc_namespace * ns,struct kern_ipc_perm * ipcp)113 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
114 {
115 struct shmid_kernel *shp;
116
117 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
118
119 if (shp->shm_nattch) {
120 shp->shm_perm.mode |= SHM_DEST;
121 /* Do not find it any more */
122 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
123 shm_unlock(shp);
124 } else
125 shm_destroy(ns, shp);
126 }
127
128 #ifdef CONFIG_IPC_NS
shm_exit_ns(struct ipc_namespace * ns)129 void shm_exit_ns(struct ipc_namespace *ns)
130 {
131 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
132 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
133 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
134 }
135 #endif
136
ipc_ns_init(void)137 static int __init ipc_ns_init(void)
138 {
139 shm_init_ns(&init_ipc_ns);
140 return 0;
141 }
142
143 pure_initcall(ipc_ns_init);
144
shm_init(void)145 void __init shm_init(void)
146 {
147 ipc_init_proc_interface("sysvipc/shm",
148 #if BITS_PER_LONG <= 32
149 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
150 #else
151 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
152 #endif
153 IPC_SHM_IDS, sysvipc_shm_proc_show);
154 }
155
shm_obtain_object(struct ipc_namespace * ns,int id)156 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
157 {
158 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
159
160 if (IS_ERR(ipcp))
161 return ERR_CAST(ipcp);
162
163 return container_of(ipcp, struct shmid_kernel, shm_perm);
164 }
165
shm_obtain_object_check(struct ipc_namespace * ns,int id)166 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
167 {
168 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
169
170 if (IS_ERR(ipcp))
171 return ERR_CAST(ipcp);
172
173 return container_of(ipcp, struct shmid_kernel, shm_perm);
174 }
175
176 /*
177 * shm_lock_(check_) routines are called in the paths where the rwsem
178 * is not necessarily held.
179 */
shm_lock(struct ipc_namespace * ns,int id)180 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
181 {
182 struct kern_ipc_perm *ipcp;
183
184 rcu_read_lock();
185 ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
186 if (IS_ERR(ipcp))
187 goto err;
188
189 ipc_lock_object(ipcp);
190 /*
191 * ipc_rmid() may have already freed the ID while ipc_lock_object()
192 * was spinning: here verify that the structure is still valid.
193 * Upon races with RMID, return -EIDRM, thus indicating that
194 * the ID points to a removed identifier.
195 */
196 if (ipc_valid_object(ipcp)) {
197 /* return a locked ipc object upon success */
198 return container_of(ipcp, struct shmid_kernel, shm_perm);
199 }
200
201 ipc_unlock_object(ipcp);
202 ipcp = ERR_PTR(-EIDRM);
203 err:
204 rcu_read_unlock();
205 /*
206 * Callers of shm_lock() must validate the status of the returned ipc
207 * object pointer and error out as appropriate.
208 */
209 return ERR_CAST(ipcp);
210 }
211
shm_lock_by_ptr(struct shmid_kernel * ipcp)212 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
213 {
214 rcu_read_lock();
215 ipc_lock_object(&ipcp->shm_perm);
216 }
217
shm_rcu_free(struct rcu_head * head)218 static void shm_rcu_free(struct rcu_head *head)
219 {
220 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
221 rcu);
222 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
223 shm_perm);
224 security_shm_free(&shp->shm_perm);
225 kvfree(shp);
226 }
227
shm_rmid(struct ipc_namespace * ns,struct shmid_kernel * s)228 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
229 {
230 list_del(&s->shm_clist);
231 ipc_rmid(&shm_ids(ns), &s->shm_perm);
232 }
233
234
__shm_open(struct vm_area_struct * vma)235 static int __shm_open(struct vm_area_struct *vma)
236 {
237 struct file *file = vma->vm_file;
238 struct shm_file_data *sfd = shm_file_data(file);
239 struct shmid_kernel *shp;
240
241 shp = shm_lock(sfd->ns, sfd->id);
242
243 if (IS_ERR(shp))
244 return PTR_ERR(shp);
245
246 if (shp->shm_file != sfd->file) {
247 /* ID was reused */
248 shm_unlock(shp);
249 return -EINVAL;
250 }
251
252 shp->shm_atim = ktime_get_real_seconds();
253 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
254 shp->shm_nattch++;
255 shm_unlock(shp);
256 return 0;
257 }
258
259 /* This is called by fork, once for every shm attach. */
shm_open(struct vm_area_struct * vma)260 static void shm_open(struct vm_area_struct *vma)
261 {
262 int err = __shm_open(vma);
263 /*
264 * We raced in the idr lookup or with shm_destroy().
265 * Either way, the ID is busted.
266 */
267 WARN_ON_ONCE(err);
268 }
269
270 /*
271 * shm_destroy - free the struct shmid_kernel
272 *
273 * @ns: namespace
274 * @shp: struct to free
275 *
276 * It has to be called with shp and shm_ids.rwsem (writer) locked,
277 * but returns with shp unlocked and freed.
278 */
shm_destroy(struct ipc_namespace * ns,struct shmid_kernel * shp)279 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
280 {
281 struct file *shm_file;
282
283 shm_file = shp->shm_file;
284 shp->shm_file = NULL;
285 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
286 shm_rmid(ns, shp);
287 shm_unlock(shp);
288 if (!is_file_hugepages(shm_file))
289 shmem_lock(shm_file, 0, shp->mlock_user);
290 else if (shp->mlock_user)
291 user_shm_unlock(i_size_read(file_inode(shm_file)),
292 shp->mlock_user);
293 fput(shm_file);
294 ipc_update_pid(&shp->shm_cprid, NULL);
295 ipc_update_pid(&shp->shm_lprid, NULL);
296 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
297 }
298
299 /*
300 * shm_may_destroy - identifies whether shm segment should be destroyed now
301 *
302 * Returns true if and only if there are no active users of the segment and
303 * one of the following is true:
304 *
305 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
306 *
307 * 2) sysctl kernel.shm_rmid_forced is set to 1.
308 */
shm_may_destroy(struct ipc_namespace * ns,struct shmid_kernel * shp)309 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
310 {
311 return (shp->shm_nattch == 0) &&
312 (ns->shm_rmid_forced ||
313 (shp->shm_perm.mode & SHM_DEST));
314 }
315
316 /*
317 * remove the attach descriptor vma.
318 * free memory for segment if it is marked destroyed.
319 * The descriptor has already been removed from the current->mm->mmap list
320 * and will later be kfree()d.
321 */
shm_close(struct vm_area_struct * vma)322 static void shm_close(struct vm_area_struct *vma)
323 {
324 struct file *file = vma->vm_file;
325 struct shm_file_data *sfd = shm_file_data(file);
326 struct shmid_kernel *shp;
327 struct ipc_namespace *ns = sfd->ns;
328
329 down_write(&shm_ids(ns).rwsem);
330 /* remove from the list of attaches of the shm segment */
331 shp = shm_lock(ns, sfd->id);
332
333 /*
334 * We raced in the idr lookup or with shm_destroy().
335 * Either way, the ID is busted.
336 */
337 if (WARN_ON_ONCE(IS_ERR(shp)))
338 goto done; /* no-op */
339
340 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
341 shp->shm_dtim = ktime_get_real_seconds();
342 shp->shm_nattch--;
343 if (shm_may_destroy(ns, shp))
344 shm_destroy(ns, shp);
345 else
346 shm_unlock(shp);
347 done:
348 up_write(&shm_ids(ns).rwsem);
349 }
350
351 /* Called with ns->shm_ids(ns).rwsem locked */
shm_try_destroy_orphaned(int id,void * p,void * data)352 static int shm_try_destroy_orphaned(int id, void *p, void *data)
353 {
354 struct ipc_namespace *ns = data;
355 struct kern_ipc_perm *ipcp = p;
356 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
357
358 /*
359 * We want to destroy segments without users and with already
360 * exit'ed originating process.
361 *
362 * As shp->* are changed under rwsem, it's safe to skip shp locking.
363 */
364 if (shp->shm_creator != NULL)
365 return 0;
366
367 if (shm_may_destroy(ns, shp)) {
368 shm_lock_by_ptr(shp);
369 shm_destroy(ns, shp);
370 }
371 return 0;
372 }
373
shm_destroy_orphaned(struct ipc_namespace * ns)374 void shm_destroy_orphaned(struct ipc_namespace *ns)
375 {
376 down_write(&shm_ids(ns).rwsem);
377 if (shm_ids(ns).in_use)
378 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
379 up_write(&shm_ids(ns).rwsem);
380 }
381
382 /* Locking assumes this will only be called with task == current */
exit_shm(struct task_struct * task)383 void exit_shm(struct task_struct *task)
384 {
385 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
386 struct shmid_kernel *shp, *n;
387
388 if (list_empty(&task->sysvshm.shm_clist))
389 return;
390
391 /*
392 * If kernel.shm_rmid_forced is not set then only keep track of
393 * which shmids are orphaned, so that a later set of the sysctl
394 * can clean them up.
395 */
396 if (!ns->shm_rmid_forced) {
397 down_read(&shm_ids(ns).rwsem);
398 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
399 shp->shm_creator = NULL;
400 /*
401 * Only under read lock but we are only called on current
402 * so no entry on the list will be shared.
403 */
404 list_del(&task->sysvshm.shm_clist);
405 up_read(&shm_ids(ns).rwsem);
406 return;
407 }
408
409 /*
410 * Destroy all already created segments, that were not yet mapped,
411 * and mark any mapped as orphan to cover the sysctl toggling.
412 * Destroy is skipped if shm_may_destroy() returns false.
413 */
414 down_write(&shm_ids(ns).rwsem);
415 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
416 shp->shm_creator = NULL;
417
418 if (shm_may_destroy(ns, shp)) {
419 shm_lock_by_ptr(shp);
420 shm_destroy(ns, shp);
421 }
422 }
423
424 /* Remove the list head from any segments still attached. */
425 list_del(&task->sysvshm.shm_clist);
426 up_write(&shm_ids(ns).rwsem);
427 }
428
shm_fault(struct vm_fault * vmf)429 static vm_fault_t shm_fault(struct vm_fault *vmf)
430 {
431 struct file *file = vmf->vma->vm_file;
432 struct shm_file_data *sfd = shm_file_data(file);
433
434 return sfd->vm_ops->fault(vmf);
435 }
436
shm_split(struct vm_area_struct * vma,unsigned long addr)437 static int shm_split(struct vm_area_struct *vma, unsigned long addr)
438 {
439 struct file *file = vma->vm_file;
440 struct shm_file_data *sfd = shm_file_data(file);
441
442 if (sfd->vm_ops->split)
443 return sfd->vm_ops->split(vma, addr);
444
445 return 0;
446 }
447
shm_pagesize(struct vm_area_struct * vma)448 static unsigned long shm_pagesize(struct vm_area_struct *vma)
449 {
450 struct file *file = vma->vm_file;
451 struct shm_file_data *sfd = shm_file_data(file);
452
453 if (sfd->vm_ops->pagesize)
454 return sfd->vm_ops->pagesize(vma);
455
456 return PAGE_SIZE;
457 }
458
459 #ifdef CONFIG_NUMA
shm_set_policy(struct vm_area_struct * vma,struct mempolicy * new)460 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
461 {
462 struct file *file = vma->vm_file;
463 struct shm_file_data *sfd = shm_file_data(file);
464 int err = 0;
465
466 if (sfd->vm_ops->set_policy)
467 err = sfd->vm_ops->set_policy(vma, new);
468 return err;
469 }
470
shm_get_policy(struct vm_area_struct * vma,unsigned long addr)471 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
472 unsigned long addr)
473 {
474 struct file *file = vma->vm_file;
475 struct shm_file_data *sfd = shm_file_data(file);
476 struct mempolicy *pol = NULL;
477
478 if (sfd->vm_ops->get_policy)
479 pol = sfd->vm_ops->get_policy(vma, addr);
480 else if (vma->vm_policy)
481 pol = vma->vm_policy;
482
483 return pol;
484 }
485 #endif
486
shm_mmap(struct file * file,struct vm_area_struct * vma)487 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
488 {
489 struct shm_file_data *sfd = shm_file_data(file);
490 int ret;
491
492 /*
493 * In case of remap_file_pages() emulation, the file can represent an
494 * IPC ID that was removed, and possibly even reused by another shm
495 * segment already. Propagate this case as an error to caller.
496 */
497 ret = __shm_open(vma);
498 if (ret)
499 return ret;
500
501 ret = call_mmap(sfd->file, vma);
502 if (ret) {
503 shm_close(vma);
504 return ret;
505 }
506 sfd->vm_ops = vma->vm_ops;
507 #ifdef CONFIG_MMU
508 WARN_ON(!sfd->vm_ops->fault);
509 #endif
510 vma->vm_ops = &shm_vm_ops;
511 return 0;
512 }
513
shm_release(struct inode * ino,struct file * file)514 static int shm_release(struct inode *ino, struct file *file)
515 {
516 struct shm_file_data *sfd = shm_file_data(file);
517
518 put_ipc_ns(sfd->ns);
519 fput(sfd->file);
520 shm_file_data(file) = NULL;
521 kfree(sfd);
522 return 0;
523 }
524
shm_fsync(struct file * file,loff_t start,loff_t end,int datasync)525 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
526 {
527 struct shm_file_data *sfd = shm_file_data(file);
528
529 if (!sfd->file->f_op->fsync)
530 return -EINVAL;
531 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
532 }
533
shm_fallocate(struct file * file,int mode,loff_t offset,loff_t len)534 static long shm_fallocate(struct file *file, int mode, loff_t offset,
535 loff_t len)
536 {
537 struct shm_file_data *sfd = shm_file_data(file);
538
539 if (!sfd->file->f_op->fallocate)
540 return -EOPNOTSUPP;
541 return sfd->file->f_op->fallocate(file, mode, offset, len);
542 }
543
shm_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)544 static unsigned long shm_get_unmapped_area(struct file *file,
545 unsigned long addr, unsigned long len, unsigned long pgoff,
546 unsigned long flags)
547 {
548 struct shm_file_data *sfd = shm_file_data(file);
549
550 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
551 pgoff, flags);
552 }
553
554 static const struct file_operations shm_file_operations = {
555 .mmap = shm_mmap,
556 .fsync = shm_fsync,
557 .release = shm_release,
558 .get_unmapped_area = shm_get_unmapped_area,
559 .llseek = noop_llseek,
560 .fallocate = shm_fallocate,
561 };
562
563 /*
564 * shm_file_operations_huge is now identical to shm_file_operations,
565 * but we keep it distinct for the sake of is_file_shm_hugepages().
566 */
567 static const struct file_operations shm_file_operations_huge = {
568 .mmap = shm_mmap,
569 .fsync = shm_fsync,
570 .release = shm_release,
571 .get_unmapped_area = shm_get_unmapped_area,
572 .llseek = noop_llseek,
573 .fallocate = shm_fallocate,
574 };
575
is_file_shm_hugepages(struct file * file)576 bool is_file_shm_hugepages(struct file *file)
577 {
578 return file->f_op == &shm_file_operations_huge;
579 }
580
581 static const struct vm_operations_struct shm_vm_ops = {
582 .open = shm_open, /* callback for a new vm-area open */
583 .close = shm_close, /* callback for when the vm-area is released */
584 .fault = shm_fault,
585 .split = shm_split,
586 .pagesize = shm_pagesize,
587 #if defined(CONFIG_NUMA)
588 .set_policy = shm_set_policy,
589 .get_policy = shm_get_policy,
590 #endif
591 };
592
593 /**
594 * newseg - Create a new shared memory segment
595 * @ns: namespace
596 * @params: ptr to the structure that contains key, size and shmflg
597 *
598 * Called with shm_ids.rwsem held as a writer.
599 */
newseg(struct ipc_namespace * ns,struct ipc_params * params)600 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
601 {
602 key_t key = params->key;
603 int shmflg = params->flg;
604 size_t size = params->u.size;
605 int error;
606 struct shmid_kernel *shp;
607 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
608 struct file *file;
609 char name[13];
610 vm_flags_t acctflag = 0;
611
612 if (size < SHMMIN || size > ns->shm_ctlmax)
613 return -EINVAL;
614
615 if (numpages << PAGE_SHIFT < size)
616 return -ENOSPC;
617
618 if (ns->shm_tot + numpages < ns->shm_tot ||
619 ns->shm_tot + numpages > ns->shm_ctlall)
620 return -ENOSPC;
621
622 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
623 if (unlikely(!shp))
624 return -ENOMEM;
625
626 shp->shm_perm.key = key;
627 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
628 shp->mlock_user = NULL;
629
630 shp->shm_perm.security = NULL;
631 error = security_shm_alloc(&shp->shm_perm);
632 if (error) {
633 kvfree(shp);
634 return error;
635 }
636
637 sprintf(name, "SYSV%08x", key);
638 if (shmflg & SHM_HUGETLB) {
639 struct hstate *hs;
640 size_t hugesize;
641
642 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
643 if (!hs) {
644 error = -EINVAL;
645 goto no_file;
646 }
647 hugesize = ALIGN(size, huge_page_size(hs));
648
649 /* hugetlb_file_setup applies strict accounting */
650 if (shmflg & SHM_NORESERVE)
651 acctflag = VM_NORESERVE;
652 file = hugetlb_file_setup(name, hugesize, acctflag,
653 &shp->mlock_user, HUGETLB_SHMFS_INODE,
654 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
655 } else {
656 /*
657 * Do not allow no accounting for OVERCOMMIT_NEVER, even
658 * if it's asked for.
659 */
660 if ((shmflg & SHM_NORESERVE) &&
661 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
662 acctflag = VM_NORESERVE;
663 file = shmem_kernel_file_setup(name, size, acctflag);
664 }
665 error = PTR_ERR(file);
666 if (IS_ERR(file))
667 goto no_file;
668
669 shp->shm_cprid = get_pid(task_tgid(current));
670 shp->shm_lprid = NULL;
671 shp->shm_atim = shp->shm_dtim = 0;
672 shp->shm_ctim = ktime_get_real_seconds();
673 shp->shm_segsz = size;
674 shp->shm_nattch = 0;
675 shp->shm_file = file;
676 shp->shm_creator = current;
677
678 /* ipc_addid() locks shp upon success. */
679 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
680 if (error < 0)
681 goto no_id;
682
683 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
684
685 /*
686 * shmid gets reported as "inode#" in /proc/pid/maps.
687 * proc-ps tools use this. Changing this will break them.
688 */
689 file_inode(file)->i_ino = shp->shm_perm.id;
690
691 ns->shm_tot += numpages;
692 error = shp->shm_perm.id;
693
694 ipc_unlock_object(&shp->shm_perm);
695 rcu_read_unlock();
696 return error;
697
698 no_id:
699 ipc_update_pid(&shp->shm_cprid, NULL);
700 ipc_update_pid(&shp->shm_lprid, NULL);
701 if (is_file_hugepages(file) && shp->mlock_user)
702 user_shm_unlock(size, shp->mlock_user);
703 fput(file);
704 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
705 return error;
706 no_file:
707 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
708 return error;
709 }
710
711 /*
712 * Called with shm_ids.rwsem and ipcp locked.
713 */
shm_more_checks(struct kern_ipc_perm * ipcp,struct ipc_params * params)714 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
715 struct ipc_params *params)
716 {
717 struct shmid_kernel *shp;
718
719 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
720 if (shp->shm_segsz < params->u.size)
721 return -EINVAL;
722
723 return 0;
724 }
725
ksys_shmget(key_t key,size_t size,int shmflg)726 long ksys_shmget(key_t key, size_t size, int shmflg)
727 {
728 struct ipc_namespace *ns;
729 static const struct ipc_ops shm_ops = {
730 .getnew = newseg,
731 .associate = security_shm_associate,
732 .more_checks = shm_more_checks,
733 };
734 struct ipc_params shm_params;
735
736 ns = current->nsproxy->ipc_ns;
737
738 shm_params.key = key;
739 shm_params.flg = shmflg;
740 shm_params.u.size = size;
741
742 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
743 }
744
SYSCALL_DEFINE3(shmget,key_t,key,size_t,size,int,shmflg)745 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
746 {
747 return ksys_shmget(key, size, shmflg);
748 }
749
copy_shmid_to_user(void __user * buf,struct shmid64_ds * in,int version)750 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
751 {
752 switch (version) {
753 case IPC_64:
754 return copy_to_user(buf, in, sizeof(*in));
755 case IPC_OLD:
756 {
757 struct shmid_ds out;
758
759 memset(&out, 0, sizeof(out));
760 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
761 out.shm_segsz = in->shm_segsz;
762 out.shm_atime = in->shm_atime;
763 out.shm_dtime = in->shm_dtime;
764 out.shm_ctime = in->shm_ctime;
765 out.shm_cpid = in->shm_cpid;
766 out.shm_lpid = in->shm_lpid;
767 out.shm_nattch = in->shm_nattch;
768
769 return copy_to_user(buf, &out, sizeof(out));
770 }
771 default:
772 return -EINVAL;
773 }
774 }
775
776 static inline unsigned long
copy_shmid_from_user(struct shmid64_ds * out,void __user * buf,int version)777 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
778 {
779 switch (version) {
780 case IPC_64:
781 if (copy_from_user(out, buf, sizeof(*out)))
782 return -EFAULT;
783 return 0;
784 case IPC_OLD:
785 {
786 struct shmid_ds tbuf_old;
787
788 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
789 return -EFAULT;
790
791 out->shm_perm.uid = tbuf_old.shm_perm.uid;
792 out->shm_perm.gid = tbuf_old.shm_perm.gid;
793 out->shm_perm.mode = tbuf_old.shm_perm.mode;
794
795 return 0;
796 }
797 default:
798 return -EINVAL;
799 }
800 }
801
copy_shminfo_to_user(void __user * buf,struct shminfo64 * in,int version)802 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
803 {
804 switch (version) {
805 case IPC_64:
806 return copy_to_user(buf, in, sizeof(*in));
807 case IPC_OLD:
808 {
809 struct shminfo out;
810
811 if (in->shmmax > INT_MAX)
812 out.shmmax = INT_MAX;
813 else
814 out.shmmax = (int)in->shmmax;
815
816 out.shmmin = in->shmmin;
817 out.shmmni = in->shmmni;
818 out.shmseg = in->shmseg;
819 out.shmall = in->shmall;
820
821 return copy_to_user(buf, &out, sizeof(out));
822 }
823 default:
824 return -EINVAL;
825 }
826 }
827
828 /*
829 * Calculate and add used RSS and swap pages of a shm.
830 * Called with shm_ids.rwsem held as a reader
831 */
shm_add_rss_swap(struct shmid_kernel * shp,unsigned long * rss_add,unsigned long * swp_add)832 static void shm_add_rss_swap(struct shmid_kernel *shp,
833 unsigned long *rss_add, unsigned long *swp_add)
834 {
835 struct inode *inode;
836
837 inode = file_inode(shp->shm_file);
838
839 if (is_file_hugepages(shp->shm_file)) {
840 struct address_space *mapping = inode->i_mapping;
841 struct hstate *h = hstate_file(shp->shm_file);
842 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
843 } else {
844 #ifdef CONFIG_SHMEM
845 struct shmem_inode_info *info = SHMEM_I(inode);
846
847 spin_lock_irq(&info->lock);
848 *rss_add += inode->i_mapping->nrpages;
849 *swp_add += info->swapped;
850 spin_unlock_irq(&info->lock);
851 #else
852 *rss_add += inode->i_mapping->nrpages;
853 #endif
854 }
855 }
856
857 /*
858 * Called with shm_ids.rwsem held as a reader
859 */
shm_get_stat(struct ipc_namespace * ns,unsigned long * rss,unsigned long * swp)860 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
861 unsigned long *swp)
862 {
863 int next_id;
864 int total, in_use;
865
866 *rss = 0;
867 *swp = 0;
868
869 in_use = shm_ids(ns).in_use;
870
871 for (total = 0, next_id = 0; total < in_use; next_id++) {
872 struct kern_ipc_perm *ipc;
873 struct shmid_kernel *shp;
874
875 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
876 if (ipc == NULL)
877 continue;
878 shp = container_of(ipc, struct shmid_kernel, shm_perm);
879
880 shm_add_rss_swap(shp, rss, swp);
881
882 total++;
883 }
884 }
885
886 /*
887 * This function handles some shmctl commands which require the rwsem
888 * to be held in write mode.
889 * NOTE: no locks must be held, the rwsem is taken inside this function.
890 */
shmctl_down(struct ipc_namespace * ns,int shmid,int cmd,struct shmid64_ds * shmid64)891 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
892 struct shmid64_ds *shmid64)
893 {
894 struct kern_ipc_perm *ipcp;
895 struct shmid_kernel *shp;
896 int err;
897
898 down_write(&shm_ids(ns).rwsem);
899 rcu_read_lock();
900
901 ipcp = ipcctl_obtain_check(ns, &shm_ids(ns), shmid, cmd,
902 &shmid64->shm_perm, 0);
903 if (IS_ERR(ipcp)) {
904 err = PTR_ERR(ipcp);
905 goto out_unlock1;
906 }
907
908 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
909
910 err = security_shm_shmctl(&shp->shm_perm, cmd);
911 if (err)
912 goto out_unlock1;
913
914 switch (cmd) {
915 case IPC_RMID:
916 ipc_lock_object(&shp->shm_perm);
917 /* do_shm_rmid unlocks the ipc object and rcu */
918 do_shm_rmid(ns, ipcp);
919 goto out_up;
920 case IPC_SET:
921 ipc_lock_object(&shp->shm_perm);
922 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
923 if (err)
924 goto out_unlock0;
925 shp->shm_ctim = ktime_get_real_seconds();
926 break;
927 default:
928 err = -EINVAL;
929 goto out_unlock1;
930 }
931
932 out_unlock0:
933 ipc_unlock_object(&shp->shm_perm);
934 out_unlock1:
935 rcu_read_unlock();
936 out_up:
937 up_write(&shm_ids(ns).rwsem);
938 return err;
939 }
940
shmctl_ipc_info(struct ipc_namespace * ns,struct shminfo64 * shminfo)941 static int shmctl_ipc_info(struct ipc_namespace *ns,
942 struct shminfo64 *shminfo)
943 {
944 int err = security_shm_shmctl(NULL, IPC_INFO);
945 if (!err) {
946 memset(shminfo, 0, sizeof(*shminfo));
947 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
948 shminfo->shmmax = ns->shm_ctlmax;
949 shminfo->shmall = ns->shm_ctlall;
950 shminfo->shmmin = SHMMIN;
951 down_read(&shm_ids(ns).rwsem);
952 err = ipc_get_maxidx(&shm_ids(ns));
953 up_read(&shm_ids(ns).rwsem);
954 if (err < 0)
955 err = 0;
956 }
957 return err;
958 }
959
shmctl_shm_info(struct ipc_namespace * ns,struct shm_info * shm_info)960 static int shmctl_shm_info(struct ipc_namespace *ns,
961 struct shm_info *shm_info)
962 {
963 int err = security_shm_shmctl(NULL, SHM_INFO);
964 if (!err) {
965 memset(shm_info, 0, sizeof(*shm_info));
966 down_read(&shm_ids(ns).rwsem);
967 shm_info->used_ids = shm_ids(ns).in_use;
968 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
969 shm_info->shm_tot = ns->shm_tot;
970 shm_info->swap_attempts = 0;
971 shm_info->swap_successes = 0;
972 err = ipc_get_maxidx(&shm_ids(ns));
973 up_read(&shm_ids(ns).rwsem);
974 if (err < 0)
975 err = 0;
976 }
977 return err;
978 }
979
shmctl_stat(struct ipc_namespace * ns,int shmid,int cmd,struct shmid64_ds * tbuf)980 static int shmctl_stat(struct ipc_namespace *ns, int shmid,
981 int cmd, struct shmid64_ds *tbuf)
982 {
983 struct shmid_kernel *shp;
984 int err;
985
986 memset(tbuf, 0, sizeof(*tbuf));
987
988 rcu_read_lock();
989 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) {
990 shp = shm_obtain_object(ns, shmid);
991 if (IS_ERR(shp)) {
992 err = PTR_ERR(shp);
993 goto out_unlock;
994 }
995 } else { /* IPC_STAT */
996 shp = shm_obtain_object_check(ns, shmid);
997 if (IS_ERR(shp)) {
998 err = PTR_ERR(shp);
999 goto out_unlock;
1000 }
1001 }
1002
1003 /*
1004 * Semantically SHM_STAT_ANY ought to be identical to
1005 * that functionality provided by the /proc/sysvipc/
1006 * interface. As such, only audit these calls and
1007 * do not do traditional S_IRUGO permission checks on
1008 * the ipc object.
1009 */
1010 if (cmd == SHM_STAT_ANY)
1011 audit_ipc_obj(&shp->shm_perm);
1012 else {
1013 err = -EACCES;
1014 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1015 goto out_unlock;
1016 }
1017
1018 err = security_shm_shmctl(&shp->shm_perm, cmd);
1019 if (err)
1020 goto out_unlock;
1021
1022 ipc_lock_object(&shp->shm_perm);
1023
1024 if (!ipc_valid_object(&shp->shm_perm)) {
1025 ipc_unlock_object(&shp->shm_perm);
1026 err = -EIDRM;
1027 goto out_unlock;
1028 }
1029
1030 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1031 tbuf->shm_segsz = shp->shm_segsz;
1032 tbuf->shm_atime = shp->shm_atim;
1033 tbuf->shm_dtime = shp->shm_dtim;
1034 tbuf->shm_ctime = shp->shm_ctim;
1035 #ifndef CONFIG_64BIT
1036 tbuf->shm_atime_high = shp->shm_atim >> 32;
1037 tbuf->shm_dtime_high = shp->shm_dtim >> 32;
1038 tbuf->shm_ctime_high = shp->shm_ctim >> 32;
1039 #endif
1040 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1041 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1042 tbuf->shm_nattch = shp->shm_nattch;
1043
1044 if (cmd == IPC_STAT) {
1045 /*
1046 * As defined in SUS:
1047 * Return 0 on success
1048 */
1049 err = 0;
1050 } else {
1051 /*
1052 * SHM_STAT and SHM_STAT_ANY (both Linux specific)
1053 * Return the full id, including the sequence number
1054 */
1055 err = shp->shm_perm.id;
1056 }
1057
1058 ipc_unlock_object(&shp->shm_perm);
1059 out_unlock:
1060 rcu_read_unlock();
1061 return err;
1062 }
1063
shmctl_do_lock(struct ipc_namespace * ns,int shmid,int cmd)1064 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
1065 {
1066 struct shmid_kernel *shp;
1067 struct file *shm_file;
1068 int err;
1069
1070 rcu_read_lock();
1071 shp = shm_obtain_object_check(ns, shmid);
1072 if (IS_ERR(shp)) {
1073 err = PTR_ERR(shp);
1074 goto out_unlock1;
1075 }
1076
1077 audit_ipc_obj(&(shp->shm_perm));
1078 err = security_shm_shmctl(&shp->shm_perm, cmd);
1079 if (err)
1080 goto out_unlock1;
1081
1082 ipc_lock_object(&shp->shm_perm);
1083
1084 /* check if shm_destroy() is tearing down shp */
1085 if (!ipc_valid_object(&shp->shm_perm)) {
1086 err = -EIDRM;
1087 goto out_unlock0;
1088 }
1089
1090 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1091 kuid_t euid = current_euid();
1092
1093 if (!uid_eq(euid, shp->shm_perm.uid) &&
1094 !uid_eq(euid, shp->shm_perm.cuid)) {
1095 err = -EPERM;
1096 goto out_unlock0;
1097 }
1098 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1099 err = -EPERM;
1100 goto out_unlock0;
1101 }
1102 }
1103
1104 shm_file = shp->shm_file;
1105 if (is_file_hugepages(shm_file))
1106 goto out_unlock0;
1107
1108 if (cmd == SHM_LOCK) {
1109 struct user_struct *user = current_user();
1110
1111 err = shmem_lock(shm_file, 1, user);
1112 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1113 shp->shm_perm.mode |= SHM_LOCKED;
1114 shp->mlock_user = user;
1115 }
1116 goto out_unlock0;
1117 }
1118
1119 /* SHM_UNLOCK */
1120 if (!(shp->shm_perm.mode & SHM_LOCKED))
1121 goto out_unlock0;
1122 shmem_lock(shm_file, 0, shp->mlock_user);
1123 shp->shm_perm.mode &= ~SHM_LOCKED;
1124 shp->mlock_user = NULL;
1125 get_file(shm_file);
1126 ipc_unlock_object(&shp->shm_perm);
1127 rcu_read_unlock();
1128 shmem_unlock_mapping(shm_file->f_mapping);
1129
1130 fput(shm_file);
1131 return err;
1132
1133 out_unlock0:
1134 ipc_unlock_object(&shp->shm_perm);
1135 out_unlock1:
1136 rcu_read_unlock();
1137 return err;
1138 }
1139
ksys_shmctl(int shmid,int cmd,struct shmid_ds __user * buf)1140 long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
1141 {
1142 int err, version;
1143 struct ipc_namespace *ns;
1144 struct shmid64_ds sem64;
1145
1146 if (cmd < 0 || shmid < 0)
1147 return -EINVAL;
1148
1149 version = ipc_parse_version(&cmd);
1150 ns = current->nsproxy->ipc_ns;
1151
1152 switch (cmd) {
1153 case IPC_INFO: {
1154 struct shminfo64 shminfo;
1155 err = shmctl_ipc_info(ns, &shminfo);
1156 if (err < 0)
1157 return err;
1158 if (copy_shminfo_to_user(buf, &shminfo, version))
1159 err = -EFAULT;
1160 return err;
1161 }
1162 case SHM_INFO: {
1163 struct shm_info shm_info;
1164 err = shmctl_shm_info(ns, &shm_info);
1165 if (err < 0)
1166 return err;
1167 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1168 err = -EFAULT;
1169 return err;
1170 }
1171 case SHM_STAT:
1172 case SHM_STAT_ANY:
1173 case IPC_STAT: {
1174 err = shmctl_stat(ns, shmid, cmd, &sem64);
1175 if (err < 0)
1176 return err;
1177 if (copy_shmid_to_user(buf, &sem64, version))
1178 err = -EFAULT;
1179 return err;
1180 }
1181 case IPC_SET:
1182 if (copy_shmid_from_user(&sem64, buf, version))
1183 return -EFAULT;
1184 /* fallthru */
1185 case IPC_RMID:
1186 return shmctl_down(ns, shmid, cmd, &sem64);
1187 case SHM_LOCK:
1188 case SHM_UNLOCK:
1189 return shmctl_do_lock(ns, shmid, cmd);
1190 default:
1191 return -EINVAL;
1192 }
1193 }
1194
SYSCALL_DEFINE3(shmctl,int,shmid,int,cmd,struct shmid_ds __user *,buf)1195 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1196 {
1197 return ksys_shmctl(shmid, cmd, buf);
1198 }
1199
1200 #ifdef CONFIG_COMPAT
1201
1202 struct compat_shmid_ds {
1203 struct compat_ipc_perm shm_perm;
1204 int shm_segsz;
1205 compat_time_t shm_atime;
1206 compat_time_t shm_dtime;
1207 compat_time_t shm_ctime;
1208 compat_ipc_pid_t shm_cpid;
1209 compat_ipc_pid_t shm_lpid;
1210 unsigned short shm_nattch;
1211 unsigned short shm_unused;
1212 compat_uptr_t shm_unused2;
1213 compat_uptr_t shm_unused3;
1214 };
1215
1216 struct compat_shminfo64 {
1217 compat_ulong_t shmmax;
1218 compat_ulong_t shmmin;
1219 compat_ulong_t shmmni;
1220 compat_ulong_t shmseg;
1221 compat_ulong_t shmall;
1222 compat_ulong_t __unused1;
1223 compat_ulong_t __unused2;
1224 compat_ulong_t __unused3;
1225 compat_ulong_t __unused4;
1226 };
1227
1228 struct compat_shm_info {
1229 compat_int_t used_ids;
1230 compat_ulong_t shm_tot, shm_rss, shm_swp;
1231 compat_ulong_t swap_attempts, swap_successes;
1232 };
1233
copy_compat_shminfo_to_user(void __user * buf,struct shminfo64 * in,int version)1234 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1235 int version)
1236 {
1237 if (in->shmmax > INT_MAX)
1238 in->shmmax = INT_MAX;
1239 if (version == IPC_64) {
1240 struct compat_shminfo64 info;
1241 memset(&info, 0, sizeof(info));
1242 info.shmmax = in->shmmax;
1243 info.shmmin = in->shmmin;
1244 info.shmmni = in->shmmni;
1245 info.shmseg = in->shmseg;
1246 info.shmall = in->shmall;
1247 return copy_to_user(buf, &info, sizeof(info));
1248 } else {
1249 struct shminfo info;
1250 memset(&info, 0, sizeof(info));
1251 info.shmmax = in->shmmax;
1252 info.shmmin = in->shmmin;
1253 info.shmmni = in->shmmni;
1254 info.shmseg = in->shmseg;
1255 info.shmall = in->shmall;
1256 return copy_to_user(buf, &info, sizeof(info));
1257 }
1258 }
1259
put_compat_shm_info(struct shm_info * ip,struct compat_shm_info __user * uip)1260 static int put_compat_shm_info(struct shm_info *ip,
1261 struct compat_shm_info __user *uip)
1262 {
1263 struct compat_shm_info info;
1264
1265 memset(&info, 0, sizeof(info));
1266 info.used_ids = ip->used_ids;
1267 info.shm_tot = ip->shm_tot;
1268 info.shm_rss = ip->shm_rss;
1269 info.shm_swp = ip->shm_swp;
1270 info.swap_attempts = ip->swap_attempts;
1271 info.swap_successes = ip->swap_successes;
1272 return copy_to_user(uip, &info, sizeof(info));
1273 }
1274
copy_compat_shmid_to_user(void __user * buf,struct shmid64_ds * in,int version)1275 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1276 int version)
1277 {
1278 if (version == IPC_64) {
1279 struct compat_shmid64_ds v;
1280 memset(&v, 0, sizeof(v));
1281 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1282 v.shm_atime = lower_32_bits(in->shm_atime);
1283 v.shm_atime_high = upper_32_bits(in->shm_atime);
1284 v.shm_dtime = lower_32_bits(in->shm_dtime);
1285 v.shm_dtime_high = upper_32_bits(in->shm_dtime);
1286 v.shm_ctime = lower_32_bits(in->shm_ctime);
1287 v.shm_ctime_high = upper_32_bits(in->shm_ctime);
1288 v.shm_segsz = in->shm_segsz;
1289 v.shm_nattch = in->shm_nattch;
1290 v.shm_cpid = in->shm_cpid;
1291 v.shm_lpid = in->shm_lpid;
1292 return copy_to_user(buf, &v, sizeof(v));
1293 } else {
1294 struct compat_shmid_ds v;
1295 memset(&v, 0, sizeof(v));
1296 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1297 v.shm_perm.key = in->shm_perm.key;
1298 v.shm_atime = in->shm_atime;
1299 v.shm_dtime = in->shm_dtime;
1300 v.shm_ctime = in->shm_ctime;
1301 v.shm_segsz = in->shm_segsz;
1302 v.shm_nattch = in->shm_nattch;
1303 v.shm_cpid = in->shm_cpid;
1304 v.shm_lpid = in->shm_lpid;
1305 return copy_to_user(buf, &v, sizeof(v));
1306 }
1307 }
1308
copy_compat_shmid_from_user(struct shmid64_ds * out,void __user * buf,int version)1309 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1310 int version)
1311 {
1312 memset(out, 0, sizeof(*out));
1313 if (version == IPC_64) {
1314 struct compat_shmid64_ds __user *p = buf;
1315 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1316 } else {
1317 struct compat_shmid_ds __user *p = buf;
1318 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1319 }
1320 }
1321
compat_ksys_shmctl(int shmid,int cmd,void __user * uptr)1322 long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr)
1323 {
1324 struct ipc_namespace *ns;
1325 struct shmid64_ds sem64;
1326 int version = compat_ipc_parse_version(&cmd);
1327 int err;
1328
1329 ns = current->nsproxy->ipc_ns;
1330
1331 if (cmd < 0 || shmid < 0)
1332 return -EINVAL;
1333
1334 switch (cmd) {
1335 case IPC_INFO: {
1336 struct shminfo64 shminfo;
1337 err = shmctl_ipc_info(ns, &shminfo);
1338 if (err < 0)
1339 return err;
1340 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1341 err = -EFAULT;
1342 return err;
1343 }
1344 case SHM_INFO: {
1345 struct shm_info shm_info;
1346 err = shmctl_shm_info(ns, &shm_info);
1347 if (err < 0)
1348 return err;
1349 if (put_compat_shm_info(&shm_info, uptr))
1350 err = -EFAULT;
1351 return err;
1352 }
1353 case IPC_STAT:
1354 case SHM_STAT_ANY:
1355 case SHM_STAT:
1356 err = shmctl_stat(ns, shmid, cmd, &sem64);
1357 if (err < 0)
1358 return err;
1359 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1360 err = -EFAULT;
1361 return err;
1362
1363 case IPC_SET:
1364 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1365 return -EFAULT;
1366 /* fallthru */
1367 case IPC_RMID:
1368 return shmctl_down(ns, shmid, cmd, &sem64);
1369 case SHM_LOCK:
1370 case SHM_UNLOCK:
1371 return shmctl_do_lock(ns, shmid, cmd);
1372 break;
1373 default:
1374 return -EINVAL;
1375 }
1376 return err;
1377 }
1378
COMPAT_SYSCALL_DEFINE3(shmctl,int,shmid,int,cmd,void __user *,uptr)1379 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1380 {
1381 return compat_ksys_shmctl(shmid, cmd, uptr);
1382 }
1383 #endif
1384
1385 /*
1386 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1387 *
1388 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1389 * "raddr" thing points to kernel space, and there has to be a wrapper around
1390 * this.
1391 */
do_shmat(int shmid,char __user * shmaddr,int shmflg,ulong * raddr,unsigned long shmlba)1392 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1393 ulong *raddr, unsigned long shmlba)
1394 {
1395 struct shmid_kernel *shp;
1396 unsigned long addr = (unsigned long)shmaddr;
1397 unsigned long size;
1398 struct file *file, *base;
1399 int err;
1400 unsigned long flags = MAP_SHARED;
1401 unsigned long prot;
1402 int acc_mode;
1403 struct ipc_namespace *ns;
1404 struct shm_file_data *sfd;
1405 int f_flags;
1406 unsigned long populate = 0;
1407
1408 err = -EINVAL;
1409 if (shmid < 0)
1410 goto out;
1411
1412 if (addr) {
1413 if (addr & (shmlba - 1)) {
1414 if (shmflg & SHM_RND) {
1415 addr &= ~(shmlba - 1); /* round down */
1416
1417 /*
1418 * Ensure that the round-down is non-nil
1419 * when remapping. This can happen for
1420 * cases when addr < shmlba.
1421 */
1422 if (!addr && (shmflg & SHM_REMAP))
1423 goto out;
1424 } else
1425 #ifndef __ARCH_FORCE_SHMLBA
1426 if (addr & ~PAGE_MASK)
1427 #endif
1428 goto out;
1429 }
1430
1431 flags |= MAP_FIXED;
1432 } else if ((shmflg & SHM_REMAP))
1433 goto out;
1434
1435 if (shmflg & SHM_RDONLY) {
1436 prot = PROT_READ;
1437 acc_mode = S_IRUGO;
1438 f_flags = O_RDONLY;
1439 } else {
1440 prot = PROT_READ | PROT_WRITE;
1441 acc_mode = S_IRUGO | S_IWUGO;
1442 f_flags = O_RDWR;
1443 }
1444 if (shmflg & SHM_EXEC) {
1445 prot |= PROT_EXEC;
1446 acc_mode |= S_IXUGO;
1447 }
1448
1449 /*
1450 * We cannot rely on the fs check since SYSV IPC does have an
1451 * additional creator id...
1452 */
1453 ns = current->nsproxy->ipc_ns;
1454 rcu_read_lock();
1455 shp = shm_obtain_object_check(ns, shmid);
1456 if (IS_ERR(shp)) {
1457 err = PTR_ERR(shp);
1458 goto out_unlock;
1459 }
1460
1461 err = -EACCES;
1462 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1463 goto out_unlock;
1464
1465 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1466 if (err)
1467 goto out_unlock;
1468
1469 ipc_lock_object(&shp->shm_perm);
1470
1471 /* check if shm_destroy() is tearing down shp */
1472 if (!ipc_valid_object(&shp->shm_perm)) {
1473 ipc_unlock_object(&shp->shm_perm);
1474 err = -EIDRM;
1475 goto out_unlock;
1476 }
1477
1478 /*
1479 * We need to take a reference to the real shm file to prevent the
1480 * pointer from becoming stale in cases where the lifetime of the outer
1481 * file extends beyond that of the shm segment. It's not usually
1482 * possible, but it can happen during remap_file_pages() emulation as
1483 * that unmaps the memory, then does ->mmap() via file reference only.
1484 * We'll deny the ->mmap() if the shm segment was since removed, but to
1485 * detect shm ID reuse we need to compare the file pointers.
1486 */
1487 base = get_file(shp->shm_file);
1488 shp->shm_nattch++;
1489 size = i_size_read(file_inode(base));
1490 ipc_unlock_object(&shp->shm_perm);
1491 rcu_read_unlock();
1492
1493 err = -ENOMEM;
1494 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1495 if (!sfd) {
1496 fput(base);
1497 goto out_nattch;
1498 }
1499
1500 file = alloc_file_clone(base, f_flags,
1501 is_file_hugepages(base) ?
1502 &shm_file_operations_huge :
1503 &shm_file_operations);
1504 err = PTR_ERR(file);
1505 if (IS_ERR(file)) {
1506 kfree(sfd);
1507 fput(base);
1508 goto out_nattch;
1509 }
1510
1511 sfd->id = shp->shm_perm.id;
1512 sfd->ns = get_ipc_ns(ns);
1513 sfd->file = base;
1514 sfd->vm_ops = NULL;
1515 file->private_data = sfd;
1516
1517 err = security_mmap_file(file, prot, flags);
1518 if (err)
1519 goto out_fput;
1520
1521 if (down_write_killable(¤t->mm->mmap_sem)) {
1522 err = -EINTR;
1523 goto out_fput;
1524 }
1525
1526 if (addr && !(shmflg & SHM_REMAP)) {
1527 err = -EINVAL;
1528 if (addr + size < addr)
1529 goto invalid;
1530
1531 if (find_vma_intersection(current->mm, addr, addr + size))
1532 goto invalid;
1533 }
1534
1535 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1536 *raddr = addr;
1537 err = 0;
1538 if (IS_ERR_VALUE(addr))
1539 err = (long)addr;
1540 invalid:
1541 up_write(¤t->mm->mmap_sem);
1542 if (populate)
1543 mm_populate(addr, populate);
1544
1545 out_fput:
1546 fput(file);
1547
1548 out_nattch:
1549 down_write(&shm_ids(ns).rwsem);
1550 shp = shm_lock(ns, shmid);
1551 shp->shm_nattch--;
1552 if (shm_may_destroy(ns, shp))
1553 shm_destroy(ns, shp);
1554 else
1555 shm_unlock(shp);
1556 up_write(&shm_ids(ns).rwsem);
1557 return err;
1558
1559 out_unlock:
1560 rcu_read_unlock();
1561 out:
1562 return err;
1563 }
1564
SYSCALL_DEFINE3(shmat,int,shmid,char __user *,shmaddr,int,shmflg)1565 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1566 {
1567 unsigned long ret;
1568 long err;
1569
1570 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1571 if (err)
1572 return err;
1573 force_successful_syscall_return();
1574 return (long)ret;
1575 }
1576
1577 #ifdef CONFIG_COMPAT
1578
1579 #ifndef COMPAT_SHMLBA
1580 #define COMPAT_SHMLBA SHMLBA
1581 #endif
1582
COMPAT_SYSCALL_DEFINE3(shmat,int,shmid,compat_uptr_t,shmaddr,int,shmflg)1583 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1584 {
1585 unsigned long ret;
1586 long err;
1587
1588 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1589 if (err)
1590 return err;
1591 force_successful_syscall_return();
1592 return (long)ret;
1593 }
1594 #endif
1595
1596 /*
1597 * detach and kill segment if marked destroyed.
1598 * The work is done in shm_close.
1599 */
ksys_shmdt(char __user * shmaddr)1600 long ksys_shmdt(char __user *shmaddr)
1601 {
1602 struct mm_struct *mm = current->mm;
1603 struct vm_area_struct *vma;
1604 unsigned long addr = (unsigned long)shmaddr;
1605 int retval = -EINVAL;
1606 #ifdef CONFIG_MMU
1607 loff_t size = 0;
1608 struct file *file;
1609 struct vm_area_struct *next;
1610 #endif
1611
1612 if (addr & ~PAGE_MASK)
1613 return retval;
1614
1615 if (down_write_killable(&mm->mmap_sem))
1616 return -EINTR;
1617
1618 /*
1619 * This function tries to be smart and unmap shm segments that
1620 * were modified by partial mlock or munmap calls:
1621 * - It first determines the size of the shm segment that should be
1622 * unmapped: It searches for a vma that is backed by shm and that
1623 * started at address shmaddr. It records it's size and then unmaps
1624 * it.
1625 * - Then it unmaps all shm vmas that started at shmaddr and that
1626 * are within the initially determined size and that are from the
1627 * same shm segment from which we determined the size.
1628 * Errors from do_munmap are ignored: the function only fails if
1629 * it's called with invalid parameters or if it's called to unmap
1630 * a part of a vma. Both calls in this function are for full vmas,
1631 * the parameters are directly copied from the vma itself and always
1632 * valid - therefore do_munmap cannot fail. (famous last words?)
1633 */
1634 /*
1635 * If it had been mremap()'d, the starting address would not
1636 * match the usual checks anyway. So assume all vma's are
1637 * above the starting address given.
1638 */
1639 vma = find_vma(mm, addr);
1640
1641 #ifdef CONFIG_MMU
1642 while (vma) {
1643 next = vma->vm_next;
1644
1645 /*
1646 * Check if the starting address would match, i.e. it's
1647 * a fragment created by mprotect() and/or munmap(), or it
1648 * otherwise it starts at this address with no hassles.
1649 */
1650 if ((vma->vm_ops == &shm_vm_ops) &&
1651 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1652
1653 /*
1654 * Record the file of the shm segment being
1655 * unmapped. With mremap(), someone could place
1656 * page from another segment but with equal offsets
1657 * in the range we are unmapping.
1658 */
1659 file = vma->vm_file;
1660 size = i_size_read(file_inode(vma->vm_file));
1661 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1662 /*
1663 * We discovered the size of the shm segment, so
1664 * break out of here and fall through to the next
1665 * loop that uses the size information to stop
1666 * searching for matching vma's.
1667 */
1668 retval = 0;
1669 vma = next;
1670 break;
1671 }
1672 vma = next;
1673 }
1674
1675 /*
1676 * We need look no further than the maximum address a fragment
1677 * could possibly have landed at. Also cast things to loff_t to
1678 * prevent overflows and make comparisons vs. equal-width types.
1679 */
1680 size = PAGE_ALIGN(size);
1681 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1682 next = vma->vm_next;
1683
1684 /* finding a matching vma now does not alter retval */
1685 if ((vma->vm_ops == &shm_vm_ops) &&
1686 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1687 (vma->vm_file == file))
1688 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1689 vma = next;
1690 }
1691
1692 #else /* CONFIG_MMU */
1693 /* under NOMMU conditions, the exact address to be destroyed must be
1694 * given
1695 */
1696 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1697 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1698 retval = 0;
1699 }
1700
1701 #endif
1702
1703 up_write(&mm->mmap_sem);
1704 return retval;
1705 }
1706
SYSCALL_DEFINE1(shmdt,char __user *,shmaddr)1707 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1708 {
1709 return ksys_shmdt(shmaddr);
1710 }
1711
1712 #ifdef CONFIG_PROC_FS
sysvipc_shm_proc_show(struct seq_file * s,void * it)1713 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1714 {
1715 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
1716 struct user_namespace *user_ns = seq_user_ns(s);
1717 struct kern_ipc_perm *ipcp = it;
1718 struct shmid_kernel *shp;
1719 unsigned long rss = 0, swp = 0;
1720
1721 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1722 shm_add_rss_swap(shp, &rss, &swp);
1723
1724 #if BITS_PER_LONG <= 32
1725 #define SIZE_SPEC "%10lu"
1726 #else
1727 #define SIZE_SPEC "%21lu"
1728 #endif
1729
1730 seq_printf(s,
1731 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1732 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1733 SIZE_SPEC " " SIZE_SPEC "\n",
1734 shp->shm_perm.key,
1735 shp->shm_perm.id,
1736 shp->shm_perm.mode,
1737 shp->shm_segsz,
1738 pid_nr_ns(shp->shm_cprid, pid_ns),
1739 pid_nr_ns(shp->shm_lprid, pid_ns),
1740 shp->shm_nattch,
1741 from_kuid_munged(user_ns, shp->shm_perm.uid),
1742 from_kgid_munged(user_ns, shp->shm_perm.gid),
1743 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1744 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1745 shp->shm_atim,
1746 shp->shm_dtim,
1747 shp->shm_ctim,
1748 rss * PAGE_SIZE,
1749 swp * PAGE_SIZE);
1750
1751 return 0;
1752 }
1753 #endif
1754