1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  flexible mmap layout support
4  *
5  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6  * All Rights Reserved.
7  *
8  * Started by Ingo Molnar <mingo@elte.hu>
9  */
10 
11 #include <linux/personality.h>
12 #include <linux/mm.h>
13 #include <linux/random.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/mm.h>
16 #include <linux/elf-randomize.h>
17 #include <linux/security.h>
18 #include <linux/mman.h>
19 
20 /*
21  * Top of mmap area (just below the process stack).
22  *
23  * Leave at least a ~128 MB hole.
24  */
25 #define MIN_GAP (128*1024*1024)
26 #define MAX_GAP (TASK_SIZE/6*5)
27 
mmap_is_legacy(struct rlimit * rlim_stack)28 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
29 {
30 	if (current->personality & ADDR_COMPAT_LAYOUT)
31 		return 1;
32 
33 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
34 		return 1;
35 
36 	return sysctl_legacy_va_layout;
37 }
38 
arch_mmap_rnd(void)39 unsigned long arch_mmap_rnd(void)
40 {
41 	unsigned long shift, rnd;
42 
43 	shift = mmap_rnd_bits;
44 #ifdef CONFIG_COMPAT
45 	if (is_32bit_task())
46 		shift = mmap_rnd_compat_bits;
47 #endif
48 	rnd = get_random_long() % (1ul << shift);
49 
50 	return rnd << PAGE_SHIFT;
51 }
52 
stack_maxrandom_size(void)53 static inline unsigned long stack_maxrandom_size(void)
54 {
55 	if (!(current->flags & PF_RANDOMIZE))
56 		return 0;
57 
58 	/* 8MB for 32bit, 1GB for 64bit */
59 	if (is_32bit_task())
60 		return (1<<23);
61 	else
62 		return (1<<30);
63 }
64 
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)65 static inline unsigned long mmap_base(unsigned long rnd,
66 				      struct rlimit *rlim_stack)
67 {
68 	unsigned long gap = rlim_stack->rlim_cur;
69 	unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
70 
71 	/* Values close to RLIM_INFINITY can overflow. */
72 	if (gap + pad > gap)
73 		gap += pad;
74 
75 	if (gap < MIN_GAP)
76 		gap = MIN_GAP;
77 	else if (gap > MAX_GAP)
78 		gap = MAX_GAP;
79 
80 	return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
81 }
82 
83 #ifdef CONFIG_PPC_RADIX_MMU
84 /*
85  * Same function as generic code used only for radix, because we don't need to overload
86  * the generic one. But we will have to duplicate, because hash select
87  * HAVE_ARCH_UNMAPPED_AREA
88  */
89 static unsigned long
radix__arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)90 radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
91 			     unsigned long len, unsigned long pgoff,
92 			     unsigned long flags)
93 {
94 	struct mm_struct *mm = current->mm;
95 	struct vm_area_struct *vma;
96 	int fixed = (flags & MAP_FIXED);
97 	unsigned long high_limit;
98 	struct vm_unmapped_area_info info;
99 
100 	high_limit = DEFAULT_MAP_WINDOW;
101 	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
102 		high_limit = TASK_SIZE;
103 
104 	if (len > high_limit)
105 		return -ENOMEM;
106 
107 	if (fixed) {
108 		if (addr > high_limit - len)
109 			return -ENOMEM;
110 		return addr;
111 	}
112 
113 	if (addr) {
114 		addr = PAGE_ALIGN(addr);
115 		vma = find_vma(mm, addr);
116 		if (high_limit - len >= addr && addr >= mmap_min_addr &&
117 		    (!vma || addr + len <= vm_start_gap(vma)))
118 			return addr;
119 	}
120 
121 	info.flags = 0;
122 	info.length = len;
123 	info.low_limit = mm->mmap_base;
124 	info.high_limit = high_limit;
125 	info.align_mask = 0;
126 
127 	return vm_unmapped_area(&info);
128 }
129 
130 static unsigned long
radix__arch_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)131 radix__arch_get_unmapped_area_topdown(struct file *filp,
132 				     const unsigned long addr0,
133 				     const unsigned long len,
134 				     const unsigned long pgoff,
135 				     const unsigned long flags)
136 {
137 	struct vm_area_struct *vma;
138 	struct mm_struct *mm = current->mm;
139 	unsigned long addr = addr0;
140 	int fixed = (flags & MAP_FIXED);
141 	unsigned long high_limit;
142 	struct vm_unmapped_area_info info;
143 
144 	high_limit = DEFAULT_MAP_WINDOW;
145 	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
146 		high_limit = TASK_SIZE;
147 
148 	if (len > high_limit)
149 		return -ENOMEM;
150 
151 	if (fixed) {
152 		if (addr > high_limit - len)
153 			return -ENOMEM;
154 		return addr;
155 	}
156 
157 	if (addr) {
158 		addr = PAGE_ALIGN(addr);
159 		vma = find_vma(mm, addr);
160 		if (high_limit - len >= addr && addr >= mmap_min_addr &&
161 		    (!vma || addr + len <= vm_start_gap(vma)))
162 			return addr;
163 	}
164 
165 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
166 	info.length = len;
167 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
168 	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
169 	info.align_mask = 0;
170 
171 	addr = vm_unmapped_area(&info);
172 	if (!(addr & ~PAGE_MASK))
173 		return addr;
174 	VM_BUG_ON(addr != -ENOMEM);
175 
176 	/*
177 	 * A failed mmap() very likely causes application failure,
178 	 * so fall back to the bottom-up function here. This scenario
179 	 * can happen with large stack limits and large mmap()
180 	 * allocations.
181 	 */
182 	return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
183 }
184 
radix__arch_pick_mmap_layout(struct mm_struct * mm,unsigned long random_factor,struct rlimit * rlim_stack)185 static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
186 					unsigned long random_factor,
187 					struct rlimit *rlim_stack)
188 {
189 	if (mmap_is_legacy(rlim_stack)) {
190 		mm->mmap_base = TASK_UNMAPPED_BASE;
191 		mm->get_unmapped_area = radix__arch_get_unmapped_area;
192 	} else {
193 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
194 		mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
195 	}
196 }
197 #else
198 /* dummy */
199 extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
200 					unsigned long random_factor,
201 					struct rlimit *rlim_stack);
202 #endif
203 /*
204  * This function, called very early during the creation of a new
205  * process VM image, sets up which VM layout function to use:
206  */
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)207 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
208 {
209 	unsigned long random_factor = 0UL;
210 
211 	if (current->flags & PF_RANDOMIZE)
212 		random_factor = arch_mmap_rnd();
213 
214 	if (radix_enabled())
215 		return radix__arch_pick_mmap_layout(mm, random_factor,
216 						    rlim_stack);
217 	/*
218 	 * Fall back to the standard layout if the personality
219 	 * bit is set, or if the expected stack growth is unlimited:
220 	 */
221 	if (mmap_is_legacy(rlim_stack)) {
222 		mm->mmap_base = TASK_UNMAPPED_BASE;
223 		mm->get_unmapped_area = arch_get_unmapped_area;
224 	} else {
225 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
226 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
227 	}
228 }
229