1 // SPDX-License-Identifier: GPL-2.0
2 /* mm/ashmem.c
3 *
4 * Anonymous Shared Memory Subsystem, ashmem
5 *
6 * Copyright (C) 2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 */
10
11 #define pr_fmt(fmt) "ashmem: " fmt
12
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/falloc.h>
18 #include <linux/miscdevice.h>
19 #include <linux/security.h>
20 #include <linux/mm.h>
21 #include <linux/mman.h>
22 #include <linux/uaccess.h>
23 #include <linux/personality.h>
24 #include <linux/bitops.h>
25 #include <linux/mutex.h>
26 #include <linux/shmem_fs.h>
27 #include "ashmem.h"
28
29 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
30 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
31 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
32
33 /**
34 * struct ashmem_area - The anonymous shared memory area
35 * @name: The optional name in /proc/pid/maps
36 * @unpinned_list: The list of all ashmem areas
37 * @file: The shmem-based backing file
38 * @size: The size of the mapping, in bytes
39 * @prot_mask: The allowed protection bits, as vm_flags
40 *
41 * The lifecycle of this structure is from our parent file's open() until
42 * its release(). It is also protected by 'ashmem_mutex'
43 *
44 * Warning: Mappings do NOT pin this structure; It dies on close()
45 */
46 struct ashmem_area {
47 char name[ASHMEM_FULL_NAME_LEN];
48 struct list_head unpinned_list;
49 struct file *file;
50 size_t size;
51 unsigned long prot_mask;
52 };
53
54 /**
55 * struct ashmem_range - A range of unpinned/evictable pages
56 * @lru: The entry in the LRU list
57 * @unpinned: The entry in its area's unpinned list
58 * @asma: The associated anonymous shared memory area.
59 * @pgstart: The starting page (inclusive)
60 * @pgend: The ending page (inclusive)
61 * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
62 *
63 * The lifecycle of this structure is from unpin to pin.
64 * It is protected by 'ashmem_mutex'
65 */
66 struct ashmem_range {
67 struct list_head lru;
68 struct list_head unpinned;
69 struct ashmem_area *asma;
70 size_t pgstart;
71 size_t pgend;
72 unsigned int purged;
73 };
74
75 /* LRU list of unpinned pages, protected by ashmem_mutex */
76 static LIST_HEAD(ashmem_lru_list);
77
78 /*
79 * long lru_count - The count of pages on our LRU list.
80 *
81 * This is protected by ashmem_mutex.
82 */
83 static unsigned long lru_count;
84
85 /*
86 * ashmem_mutex - protects the list of and each individual ashmem_area
87 *
88 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
89 */
90 static DEFINE_MUTEX(ashmem_mutex);
91
92 static struct kmem_cache *ashmem_area_cachep __read_mostly;
93 static struct kmem_cache *ashmem_range_cachep __read_mostly;
94
range_size(struct ashmem_range * range)95 static inline unsigned long range_size(struct ashmem_range *range)
96 {
97 return range->pgend - range->pgstart + 1;
98 }
99
range_on_lru(struct ashmem_range * range)100 static inline bool range_on_lru(struct ashmem_range *range)
101 {
102 return range->purged == ASHMEM_NOT_PURGED;
103 }
104
page_range_subsumes_range(struct ashmem_range * range,size_t start,size_t end)105 static inline bool page_range_subsumes_range(struct ashmem_range *range,
106 size_t start, size_t end)
107 {
108 return (range->pgstart >= start) && (range->pgend <= end);
109 }
110
page_range_subsumed_by_range(struct ashmem_range * range,size_t start,size_t end)111 static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
112 size_t start, size_t end)
113 {
114 return (range->pgstart <= start) && (range->pgend >= end);
115 }
116
page_in_range(struct ashmem_range * range,size_t page)117 static inline bool page_in_range(struct ashmem_range *range, size_t page)
118 {
119 return (range->pgstart <= page) && (range->pgend >= page);
120 }
121
page_range_in_range(struct ashmem_range * range,size_t start,size_t end)122 static inline bool page_range_in_range(struct ashmem_range *range,
123 size_t start, size_t end)
124 {
125 return page_in_range(range, start) || page_in_range(range, end) ||
126 page_range_subsumes_range(range, start, end);
127 }
128
range_before_page(struct ashmem_range * range,size_t page)129 static inline bool range_before_page(struct ashmem_range *range, size_t page)
130 {
131 return range->pgend < page;
132 }
133
134 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
135
136 /**
137 * lru_add() - Adds a range of memory to the LRU list
138 * @range: The memory range being added.
139 *
140 * The range is first added to the end (tail) of the LRU list.
141 * After this, the size of the range is added to @lru_count
142 */
lru_add(struct ashmem_range * range)143 static inline void lru_add(struct ashmem_range *range)
144 {
145 list_add_tail(&range->lru, &ashmem_lru_list);
146 lru_count += range_size(range);
147 }
148
149 /**
150 * lru_del() - Removes a range of memory from the LRU list
151 * @range: The memory range being removed
152 *
153 * The range is first deleted from the LRU list.
154 * After this, the size of the range is removed from @lru_count
155 */
lru_del(struct ashmem_range * range)156 static inline void lru_del(struct ashmem_range *range)
157 {
158 list_del(&range->lru);
159 lru_count -= range_size(range);
160 }
161
162 /**
163 * range_alloc() - Allocates and initializes a new ashmem_range structure
164 * @asma: The associated ashmem_area
165 * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
166 * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
167 * @start: The starting page (inclusive)
168 * @end: The ending page (inclusive)
169 *
170 * This function is protected by ashmem_mutex.
171 *
172 * Return: 0 if successful, or -ENOMEM if there is an error
173 */
range_alloc(struct ashmem_area * asma,struct ashmem_range * prev_range,unsigned int purged,size_t start,size_t end)174 static int range_alloc(struct ashmem_area *asma,
175 struct ashmem_range *prev_range, unsigned int purged,
176 size_t start, size_t end)
177 {
178 struct ashmem_range *range;
179
180 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
181 if (!range)
182 return -ENOMEM;
183
184 range->asma = asma;
185 range->pgstart = start;
186 range->pgend = end;
187 range->purged = purged;
188
189 list_add_tail(&range->unpinned, &prev_range->unpinned);
190
191 if (range_on_lru(range))
192 lru_add(range);
193
194 return 0;
195 }
196
197 /**
198 * range_del() - Deletes and dealloctes an ashmem_range structure
199 * @range: The associated ashmem_range that has previously been allocated
200 */
range_del(struct ashmem_range * range)201 static void range_del(struct ashmem_range *range)
202 {
203 list_del(&range->unpinned);
204 if (range_on_lru(range))
205 lru_del(range);
206 kmem_cache_free(ashmem_range_cachep, range);
207 }
208
209 /**
210 * range_shrink() - Shrinks an ashmem_range
211 * @range: The associated ashmem_range being shrunk
212 * @start: The starting byte of the new range
213 * @end: The ending byte of the new range
214 *
215 * This does not modify the data inside the existing range in any way - It
216 * simply shrinks the boundaries of the range.
217 *
218 * Theoretically, with a little tweaking, this could eventually be changed
219 * to range_resize, and expand the lru_count if the new range is larger.
220 */
range_shrink(struct ashmem_range * range,size_t start,size_t end)221 static inline void range_shrink(struct ashmem_range *range,
222 size_t start, size_t end)
223 {
224 size_t pre = range_size(range);
225
226 range->pgstart = start;
227 range->pgend = end;
228
229 if (range_on_lru(range))
230 lru_count -= pre - range_size(range);
231 }
232
233 /**
234 * ashmem_open() - Opens an Anonymous Shared Memory structure
235 * @inode: The backing file's index node(?)
236 * @file: The backing file
237 *
238 * Please note that the ashmem_area is not returned by this function - It is
239 * instead written to "file->private_data".
240 *
241 * Return: 0 if successful, or another code if unsuccessful.
242 */
ashmem_open(struct inode * inode,struct file * file)243 static int ashmem_open(struct inode *inode, struct file *file)
244 {
245 struct ashmem_area *asma;
246 int ret;
247
248 ret = generic_file_open(inode, file);
249 if (ret)
250 return ret;
251
252 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
253 if (!asma)
254 return -ENOMEM;
255
256 INIT_LIST_HEAD(&asma->unpinned_list);
257 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
258 asma->prot_mask = PROT_MASK;
259 file->private_data = asma;
260
261 return 0;
262 }
263
264 /**
265 * ashmem_release() - Releases an Anonymous Shared Memory structure
266 * @ignored: The backing file's Index Node(?) - It is ignored here.
267 * @file: The backing file
268 *
269 * Return: 0 if successful. If it is anything else, go have a coffee and
270 * try again.
271 */
ashmem_release(struct inode * ignored,struct file * file)272 static int ashmem_release(struct inode *ignored, struct file *file)
273 {
274 struct ashmem_area *asma = file->private_data;
275 struct ashmem_range *range, *next;
276
277 mutex_lock(&ashmem_mutex);
278 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
279 range_del(range);
280 mutex_unlock(&ashmem_mutex);
281
282 if (asma->file)
283 fput(asma->file);
284 kmem_cache_free(ashmem_area_cachep, asma);
285
286 return 0;
287 }
288
ashmem_read_iter(struct kiocb * iocb,struct iov_iter * iter)289 static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
290 {
291 struct ashmem_area *asma = iocb->ki_filp->private_data;
292 int ret = 0;
293
294 mutex_lock(&ashmem_mutex);
295
296 /* If size is not set, or set to 0, always return EOF. */
297 if (asma->size == 0)
298 goto out_unlock;
299
300 if (!asma->file) {
301 ret = -EBADF;
302 goto out_unlock;
303 }
304
305 /*
306 * asma and asma->file are used outside the lock here. We assume
307 * once asma->file is set it will never be changed, and will not
308 * be destroyed until all references to the file are dropped and
309 * ashmem_release is called.
310 */
311 mutex_unlock(&ashmem_mutex);
312 ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
313 mutex_lock(&ashmem_mutex);
314 if (ret > 0)
315 asma->file->f_pos = iocb->ki_pos;
316 out_unlock:
317 mutex_unlock(&ashmem_mutex);
318 return ret;
319 }
320
ashmem_llseek(struct file * file,loff_t offset,int origin)321 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
322 {
323 struct ashmem_area *asma = file->private_data;
324 loff_t ret;
325
326 mutex_lock(&ashmem_mutex);
327
328 if (asma->size == 0) {
329 mutex_unlock(&ashmem_mutex);
330 return -EINVAL;
331 }
332
333 if (!asma->file) {
334 mutex_unlock(&ashmem_mutex);
335 return -EBADF;
336 }
337
338 mutex_unlock(&ashmem_mutex);
339
340 ret = vfs_llseek(asma->file, offset, origin);
341 if (ret < 0)
342 return ret;
343
344 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
345 file->f_pos = asma->file->f_pos;
346 return ret;
347 }
348
calc_vm_may_flags(unsigned long prot)349 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
350 {
351 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
352 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
353 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
354 }
355
ashmem_mmap(struct file * file,struct vm_area_struct * vma)356 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
357 {
358 struct ashmem_area *asma = file->private_data;
359 int ret = 0;
360
361 mutex_lock(&ashmem_mutex);
362
363 /* user needs to SET_SIZE before mapping */
364 if (!asma->size) {
365 ret = -EINVAL;
366 goto out;
367 }
368
369 /* requested mapping size larger than object size */
370 if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
371 ret = -EINVAL;
372 goto out;
373 }
374
375 /* requested protection bits must match our allowed protection mask */
376 if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
377 calc_vm_prot_bits(PROT_MASK, 0)) {
378 ret = -EPERM;
379 goto out;
380 }
381 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
382
383 if (!asma->file) {
384 char *name = ASHMEM_NAME_DEF;
385 struct file *vmfile;
386
387 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
388 name = asma->name;
389
390 /* ... and allocate the backing shmem file */
391 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
392 if (IS_ERR(vmfile)) {
393 ret = PTR_ERR(vmfile);
394 goto out;
395 }
396 vmfile->f_mode |= FMODE_LSEEK;
397 asma->file = vmfile;
398 }
399 get_file(asma->file);
400
401 /*
402 * XXX - Reworked to use shmem_zero_setup() instead of
403 * shmem_set_file while we're in staging. -jstultz
404 */
405 if (vma->vm_flags & VM_SHARED) {
406 ret = shmem_zero_setup(vma);
407 if (ret) {
408 fput(asma->file);
409 goto out;
410 }
411 } else {
412 vma_set_anonymous(vma);
413 }
414
415 if (vma->vm_file)
416 fput(vma->vm_file);
417 vma->vm_file = asma->file;
418
419 out:
420 mutex_unlock(&ashmem_mutex);
421 return ret;
422 }
423
424 /*
425 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
426 *
427 * 'nr_to_scan' is the number of objects to scan for freeing.
428 *
429 * 'gfp_mask' is the mask of the allocation that got us into this mess.
430 *
431 * Return value is the number of objects freed or -1 if we cannot
432 * proceed without risk of deadlock (due to gfp_mask).
433 *
434 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
435 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
436 * pages freed.
437 */
438 static unsigned long
ashmem_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)439 ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
440 {
441 struct ashmem_range *range, *next;
442 unsigned long freed = 0;
443
444 /* We might recurse into filesystem code, so bail out if necessary */
445 if (!(sc->gfp_mask & __GFP_FS))
446 return SHRINK_STOP;
447
448 if (!mutex_trylock(&ashmem_mutex))
449 return -1;
450
451 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
452 loff_t start = range->pgstart * PAGE_SIZE;
453 loff_t end = (range->pgend + 1) * PAGE_SIZE;
454
455 range->asma->file->f_op->fallocate(range->asma->file,
456 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
457 start, end - start);
458 range->purged = ASHMEM_WAS_PURGED;
459 lru_del(range);
460
461 freed += range_size(range);
462 if (--sc->nr_to_scan <= 0)
463 break;
464 }
465 mutex_unlock(&ashmem_mutex);
466 return freed;
467 }
468
469 static unsigned long
ashmem_shrink_count(struct shrinker * shrink,struct shrink_control * sc)470 ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
471 {
472 /*
473 * note that lru_count is count of pages on the lru, not a count of
474 * objects on the list. This means the scan function needs to return the
475 * number of pages freed, not the number of objects scanned.
476 */
477 return lru_count;
478 }
479
480 static struct shrinker ashmem_shrinker = {
481 .count_objects = ashmem_shrink_count,
482 .scan_objects = ashmem_shrink_scan,
483 /*
484 * XXX (dchinner): I wish people would comment on why they need on
485 * significant changes to the default value here
486 */
487 .seeks = DEFAULT_SEEKS * 4,
488 };
489
set_prot_mask(struct ashmem_area * asma,unsigned long prot)490 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
491 {
492 int ret = 0;
493
494 mutex_lock(&ashmem_mutex);
495
496 /* the user can only remove, not add, protection bits */
497 if ((asma->prot_mask & prot) != prot) {
498 ret = -EINVAL;
499 goto out;
500 }
501
502 /* does the application expect PROT_READ to imply PROT_EXEC? */
503 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
504 prot |= PROT_EXEC;
505
506 asma->prot_mask = prot;
507
508 out:
509 mutex_unlock(&ashmem_mutex);
510 return ret;
511 }
512
set_name(struct ashmem_area * asma,void __user * name)513 static int set_name(struct ashmem_area *asma, void __user *name)
514 {
515 int len;
516 int ret = 0;
517 char local_name[ASHMEM_NAME_LEN];
518
519 /*
520 * Holding the ashmem_mutex while doing a copy_from_user might cause
521 * an data abort which would try to access mmap_sem. If another
522 * thread has invoked ashmem_mmap then it will be holding the
523 * semaphore and will be waiting for ashmem_mutex, there by leading to
524 * deadlock. We'll release the mutex and take the name to a local
525 * variable that does not need protection and later copy the local
526 * variable to the structure member with lock held.
527 */
528 len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
529 if (len < 0)
530 return len;
531 if (len == ASHMEM_NAME_LEN)
532 local_name[ASHMEM_NAME_LEN - 1] = '\0';
533 mutex_lock(&ashmem_mutex);
534 /* cannot change an existing mapping's name */
535 if (asma->file)
536 ret = -EINVAL;
537 else
538 strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
539
540 mutex_unlock(&ashmem_mutex);
541 return ret;
542 }
543
get_name(struct ashmem_area * asma,void __user * name)544 static int get_name(struct ashmem_area *asma, void __user *name)
545 {
546 int ret = 0;
547 size_t len;
548 /*
549 * Have a local variable to which we'll copy the content
550 * from asma with the lock held. Later we can copy this to the user
551 * space safely without holding any locks. So even if we proceed to
552 * wait for mmap_sem, it won't lead to deadlock.
553 */
554 char local_name[ASHMEM_NAME_LEN];
555
556 mutex_lock(&ashmem_mutex);
557 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
558 /*
559 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
560 * prevents us from revealing one user's stack to another.
561 */
562 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
563 memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
564 } else {
565 len = sizeof(ASHMEM_NAME_DEF);
566 memcpy(local_name, ASHMEM_NAME_DEF, len);
567 }
568 mutex_unlock(&ashmem_mutex);
569
570 /*
571 * Now we are just copying from the stack variable to userland
572 * No lock held
573 */
574 if (copy_to_user(name, local_name, len))
575 ret = -EFAULT;
576 return ret;
577 }
578
579 /*
580 * ashmem_pin - pin the given ashmem region, returning whether it was
581 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
582 *
583 * Caller must hold ashmem_mutex.
584 */
ashmem_pin(struct ashmem_area * asma,size_t pgstart,size_t pgend)585 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
586 {
587 struct ashmem_range *range, *next;
588 int ret = ASHMEM_NOT_PURGED;
589
590 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
591 /* moved past last applicable page; we can short circuit */
592 if (range_before_page(range, pgstart))
593 break;
594
595 /*
596 * The user can ask us to pin pages that span multiple ranges,
597 * or to pin pages that aren't even unpinned, so this is messy.
598 *
599 * Four cases:
600 * 1. The requested range subsumes an existing range, so we
601 * just remove the entire matching range.
602 * 2. The requested range overlaps the start of an existing
603 * range, so we just update that range.
604 * 3. The requested range overlaps the end of an existing
605 * range, so we just update that range.
606 * 4. The requested range punches a hole in an existing range,
607 * so we have to update one side of the range and then
608 * create a new range for the other side.
609 */
610 if (page_range_in_range(range, pgstart, pgend)) {
611 ret |= range->purged;
612
613 /* Case #1: Easy. Just nuke the whole thing. */
614 if (page_range_subsumes_range(range, pgstart, pgend)) {
615 range_del(range);
616 continue;
617 }
618
619 /* Case #2: We overlap from the start, so adjust it */
620 if (range->pgstart >= pgstart) {
621 range_shrink(range, pgend + 1, range->pgend);
622 continue;
623 }
624
625 /* Case #3: We overlap from the rear, so adjust it */
626 if (range->pgend <= pgend) {
627 range_shrink(range, range->pgstart,
628 pgstart - 1);
629 continue;
630 }
631
632 /*
633 * Case #4: We eat a chunk out of the middle. A bit
634 * more complicated, we allocate a new range for the
635 * second half and adjust the first chunk's endpoint.
636 */
637 range_alloc(asma, range, range->purged,
638 pgend + 1, range->pgend);
639 range_shrink(range, range->pgstart, pgstart - 1);
640 break;
641 }
642 }
643
644 return ret;
645 }
646
647 /*
648 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
649 *
650 * Caller must hold ashmem_mutex.
651 */
ashmem_unpin(struct ashmem_area * asma,size_t pgstart,size_t pgend)652 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
653 {
654 struct ashmem_range *range, *next;
655 unsigned int purged = ASHMEM_NOT_PURGED;
656
657 restart:
658 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
659 /* short circuit: this is our insertion point */
660 if (range_before_page(range, pgstart))
661 break;
662
663 /*
664 * The user can ask us to unpin pages that are already entirely
665 * or partially pinned. We handle those two cases here.
666 */
667 if (page_range_subsumed_by_range(range, pgstart, pgend))
668 return 0;
669 if (page_range_in_range(range, pgstart, pgend)) {
670 pgstart = min(range->pgstart, pgstart);
671 pgend = max(range->pgend, pgend);
672 purged |= range->purged;
673 range_del(range);
674 goto restart;
675 }
676 }
677
678 return range_alloc(asma, range, purged, pgstart, pgend);
679 }
680
681 /*
682 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
683 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
684 *
685 * Caller must hold ashmem_mutex.
686 */
ashmem_get_pin_status(struct ashmem_area * asma,size_t pgstart,size_t pgend)687 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
688 size_t pgend)
689 {
690 struct ashmem_range *range;
691 int ret = ASHMEM_IS_PINNED;
692
693 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
694 if (range_before_page(range, pgstart))
695 break;
696 if (page_range_in_range(range, pgstart, pgend)) {
697 ret = ASHMEM_IS_UNPINNED;
698 break;
699 }
700 }
701
702 return ret;
703 }
704
ashmem_pin_unpin(struct ashmem_area * asma,unsigned long cmd,void __user * p)705 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
706 void __user *p)
707 {
708 struct ashmem_pin pin;
709 size_t pgstart, pgend;
710 int ret = -EINVAL;
711
712 if (copy_from_user(&pin, p, sizeof(pin)))
713 return -EFAULT;
714
715 mutex_lock(&ashmem_mutex);
716
717 if (!asma->file)
718 goto out_unlock;
719
720 /* per custom, you can pass zero for len to mean "everything onward" */
721 if (!pin.len)
722 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
723
724 if ((pin.offset | pin.len) & ~PAGE_MASK)
725 goto out_unlock;
726
727 if (((__u32)-1) - pin.offset < pin.len)
728 goto out_unlock;
729
730 if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
731 goto out_unlock;
732
733 pgstart = pin.offset / PAGE_SIZE;
734 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
735
736 switch (cmd) {
737 case ASHMEM_PIN:
738 ret = ashmem_pin(asma, pgstart, pgend);
739 break;
740 case ASHMEM_UNPIN:
741 ret = ashmem_unpin(asma, pgstart, pgend);
742 break;
743 case ASHMEM_GET_PIN_STATUS:
744 ret = ashmem_get_pin_status(asma, pgstart, pgend);
745 break;
746 }
747
748 out_unlock:
749 mutex_unlock(&ashmem_mutex);
750
751 return ret;
752 }
753
ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)754 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 {
756 struct ashmem_area *asma = file->private_data;
757 long ret = -ENOTTY;
758
759 switch (cmd) {
760 case ASHMEM_SET_NAME:
761 ret = set_name(asma, (void __user *)arg);
762 break;
763 case ASHMEM_GET_NAME:
764 ret = get_name(asma, (void __user *)arg);
765 break;
766 case ASHMEM_SET_SIZE:
767 ret = -EINVAL;
768 mutex_lock(&ashmem_mutex);
769 if (!asma->file) {
770 ret = 0;
771 asma->size = (size_t)arg;
772 }
773 mutex_unlock(&ashmem_mutex);
774 break;
775 case ASHMEM_GET_SIZE:
776 ret = asma->size;
777 break;
778 case ASHMEM_SET_PROT_MASK:
779 ret = set_prot_mask(asma, arg);
780 break;
781 case ASHMEM_GET_PROT_MASK:
782 ret = asma->prot_mask;
783 break;
784 case ASHMEM_PIN:
785 case ASHMEM_UNPIN:
786 case ASHMEM_GET_PIN_STATUS:
787 ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
788 break;
789 case ASHMEM_PURGE_ALL_CACHES:
790 ret = -EPERM;
791 if (capable(CAP_SYS_ADMIN)) {
792 struct shrink_control sc = {
793 .gfp_mask = GFP_KERNEL,
794 .nr_to_scan = LONG_MAX,
795 };
796 ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
797 ashmem_shrink_scan(&ashmem_shrinker, &sc);
798 }
799 break;
800 }
801
802 return ret;
803 }
804
805 /* support of 32bit userspace on 64bit platforms */
806 #ifdef CONFIG_COMPAT
compat_ashmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)807 static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
808 unsigned long arg)
809 {
810 switch (cmd) {
811 case COMPAT_ASHMEM_SET_SIZE:
812 cmd = ASHMEM_SET_SIZE;
813 break;
814 case COMPAT_ASHMEM_SET_PROT_MASK:
815 cmd = ASHMEM_SET_PROT_MASK;
816 break;
817 }
818 return ashmem_ioctl(file, cmd, arg);
819 }
820 #endif
821 #ifdef CONFIG_PROC_FS
ashmem_show_fdinfo(struct seq_file * m,struct file * file)822 static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
823 {
824 struct ashmem_area *asma = file->private_data;
825
826 mutex_lock(&ashmem_mutex);
827
828 if (asma->file)
829 seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
830
831 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
832 seq_printf(m, "name:\t%s\n",
833 asma->name + ASHMEM_NAME_PREFIX_LEN);
834
835 mutex_unlock(&ashmem_mutex);
836 }
837 #endif
838 static const struct file_operations ashmem_fops = {
839 .owner = THIS_MODULE,
840 .open = ashmem_open,
841 .release = ashmem_release,
842 .read_iter = ashmem_read_iter,
843 .llseek = ashmem_llseek,
844 .mmap = ashmem_mmap,
845 .unlocked_ioctl = ashmem_ioctl,
846 #ifdef CONFIG_COMPAT
847 .compat_ioctl = compat_ashmem_ioctl,
848 #endif
849 #ifdef CONFIG_PROC_FS
850 .show_fdinfo = ashmem_show_fdinfo,
851 #endif
852 };
853
854 static struct miscdevice ashmem_misc = {
855 .minor = MISC_DYNAMIC_MINOR,
856 .name = "ashmem",
857 .fops = &ashmem_fops,
858 };
859
ashmem_init(void)860 static int __init ashmem_init(void)
861 {
862 int ret = -ENOMEM;
863
864 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
865 sizeof(struct ashmem_area),
866 0, 0, NULL);
867 if (!ashmem_area_cachep) {
868 pr_err("failed to create slab cache\n");
869 goto out;
870 }
871
872 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
873 sizeof(struct ashmem_range),
874 0, 0, NULL);
875 if (!ashmem_range_cachep) {
876 pr_err("failed to create slab cache\n");
877 goto out_free1;
878 }
879
880 ret = misc_register(&ashmem_misc);
881 if (ret) {
882 pr_err("failed to register misc device!\n");
883 goto out_free2;
884 }
885
886 ret = register_shrinker(&ashmem_shrinker);
887 if (ret) {
888 pr_err("failed to register shrinker!\n");
889 goto out_demisc;
890 }
891
892 pr_info("initialized\n");
893
894 return 0;
895
896 out_demisc:
897 misc_deregister(&ashmem_misc);
898 out_free2:
899 kmem_cache_destroy(ashmem_range_cachep);
900 out_free1:
901 kmem_cache_destroy(ashmem_area_cachep);
902 out:
903 return ret;
904 }
905 device_initcall(ashmem_init);
906