1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #include <linux/sched.h>
5 #include <linux/mman.h>
6 #include <linux/shm.h>
7 
8 #define COLOUR_ALIGN(addr,pgoff)		\
9 	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
10 	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
11 
12 /*
13  * We need to ensure that shared mappings are correctly aligned to
14  * avoid aliasing issues with VIPT caches.  We need to ensure that
15  * a specific page of an object is always mapped at a multiple of
16  * SHMLBA bytes.
17  *
18  * We unconditionally provide this function for all cases, however
19  * in the VIVT case, we optimise out the alignment rules.
20  */
21 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)22 arch_get_unmapped_area(struct file *filp, unsigned long addr,
23 		       unsigned long len, unsigned long pgoff,
24 		       unsigned long flags)
25 {
26 	struct mm_struct *mm = current->mm;
27 	struct vm_area_struct *vma;
28 	int do_align = 0;
29 	struct vm_unmapped_area_info info;
30 	int aliasing = 0;
31 	if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
32 		aliasing = 1;
33 
34 	/*
35 	 * We only need to do colour alignment if either the I or D
36 	 * caches alias.
37 	 */
38 	if (aliasing)
39 		do_align = filp || (flags & MAP_SHARED);
40 
41 	/*
42 	 * We enforce the MAP_FIXED case.
43 	 */
44 	if (flags & MAP_FIXED) {
45 		if (aliasing && flags & MAP_SHARED &&
46 		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
47 			return -EINVAL;
48 		return addr;
49 	}
50 
51 	if (len > TASK_SIZE)
52 		return -ENOMEM;
53 
54 	if (addr) {
55 		if (do_align)
56 			addr = COLOUR_ALIGN(addr, pgoff);
57 		else
58 			addr = PAGE_ALIGN(addr);
59 
60 		vma = find_vma(mm, addr);
61 		if (TASK_SIZE - len >= addr &&
62 		    (!vma || addr + len <= vma->vm_start))
63 			return addr;
64 	}
65 
66 	info.flags = 0;
67 	info.length = len;
68 	info.low_limit = mm->mmap_base;
69 	info.high_limit = TASK_SIZE;
70 	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
71 	info.align_offset = pgoff << PAGE_SHIFT;
72 	return vm_unmapped_area(&info);
73 }
74