1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VDSO implementations.
4 *
5 * Copyright (C) 2012 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/time_namespace.h>
22 #include <linux/timekeeper_internal.h>
23 #include <linux/vmalloc.h>
24 #include <vdso/datapage.h>
25 #include <vdso/helpers.h>
26 #include <vdso/vsyscall.h>
27
28 #include <asm/cacheflush.h>
29 #include <asm/signal32.h>
30 #include <asm/vdso.h>
31
32 enum vdso_abi {
33 VDSO_ABI_AA64,
34 VDSO_ABI_AA32,
35 };
36
37 enum vvar_pages {
38 VVAR_DATA_PAGE_OFFSET,
39 VVAR_TIMENS_PAGE_OFFSET,
40 VVAR_NR_PAGES,
41 };
42
43 struct vdso_abi_info {
44 const char *name;
45 const char *vdso_code_start;
46 const char *vdso_code_end;
47 unsigned long vdso_pages;
48 /* Data Mapping */
49 struct vm_special_mapping *dm;
50 /* Code Mapping */
51 struct vm_special_mapping *cm;
52 };
53
54 static struct vdso_abi_info vdso_info[] __ro_after_init = {
55 [VDSO_ABI_AA64] = {
56 .name = "vdso",
57 .vdso_code_start = vdso_start,
58 .vdso_code_end = vdso_end,
59 },
60 #ifdef CONFIG_COMPAT_VDSO
61 [VDSO_ABI_AA32] = {
62 .name = "vdso32",
63 .vdso_code_start = vdso32_start,
64 .vdso_code_end = vdso32_end,
65 },
66 #endif /* CONFIG_COMPAT_VDSO */
67 };
68
69 /*
70 * The vDSO data page.
71 */
72 static union {
73 struct vdso_data data[CS_BASES];
74 u8 page[PAGE_SIZE];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = vdso_data_store.data;
77
vdso_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)78 static int vdso_mremap(const struct vm_special_mapping *sm,
79 struct vm_area_struct *new_vma)
80 {
81 current->mm->context.vdso = (void *)new_vma->vm_start;
82
83 return 0;
84 }
85
__vdso_init(enum vdso_abi abi)86 static int __init __vdso_init(enum vdso_abi abi)
87 {
88 int i;
89 struct page **vdso_pagelist;
90 unsigned long pfn;
91
92 if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
93 pr_err("vDSO is not a valid ELF object!\n");
94 return -EINVAL;
95 }
96
97 vdso_info[abi].vdso_pages = (
98 vdso_info[abi].vdso_code_end -
99 vdso_info[abi].vdso_code_start) >>
100 PAGE_SHIFT;
101
102 vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
103 sizeof(struct page *),
104 GFP_KERNEL);
105 if (vdso_pagelist == NULL)
106 return -ENOMEM;
107
108 /* Grab the vDSO code pages. */
109 pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
110
111 for (i = 0; i < vdso_info[abi].vdso_pages; i++)
112 vdso_pagelist[i] = pfn_to_page(pfn + i);
113
114 vdso_info[abi].cm->pages = vdso_pagelist;
115
116 return 0;
117 }
118
119 #ifdef CONFIG_TIME_NS
arch_get_vdso_data(void * vvar_page)120 struct vdso_data *arch_get_vdso_data(void *vvar_page)
121 {
122 return (struct vdso_data *)(vvar_page);
123 }
124
125 /*
126 * The vvar mapping contains data for a specific time namespace, so when a task
127 * changes namespace we must unmap its vvar data for the old namespace.
128 * Subsequent faults will map in data for the new namespace.
129 *
130 * For more details see timens_setup_vdso_data().
131 */
vdso_join_timens(struct task_struct * task,struct time_namespace * ns)132 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
133 {
134 struct mm_struct *mm = task->mm;
135 struct vm_area_struct *vma;
136 VMA_ITERATOR(vmi, mm, 0);
137
138 mmap_read_lock(mm);
139
140 for_each_vma(vmi, vma) {
141 unsigned long size = vma->vm_end - vma->vm_start;
142
143 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
144 zap_page_range(vma, vma->vm_start, size);
145 #ifdef CONFIG_COMPAT_VDSO
146 if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
147 zap_page_range(vma, vma->vm_start, size);
148 #endif
149 }
150
151 mmap_read_unlock(mm);
152 return 0;
153 }
154
find_timens_vvar_page(struct vm_area_struct * vma)155 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
156 {
157 if (likely(vma->vm_mm == current->mm))
158 return current->nsproxy->time_ns->vvar_page;
159
160 /*
161 * VM_PFNMAP | VM_IO protect .fault() handler from being called
162 * through interfaces like /proc/$pid/mem or
163 * process_vm_{readv,writev}() as long as there's no .access()
164 * in special_mapping_vmops.
165 * For more details check_vma_flags() and __access_remote_vm()
166 */
167 WARN(1, "vvar_page accessed remotely");
168
169 return NULL;
170 }
171 #else
find_timens_vvar_page(struct vm_area_struct * vma)172 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
173 {
174 return NULL;
175 }
176 #endif
177
vvar_fault(const struct vm_special_mapping * sm,struct vm_area_struct * vma,struct vm_fault * vmf)178 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
179 struct vm_area_struct *vma, struct vm_fault *vmf)
180 {
181 struct page *timens_page = find_timens_vvar_page(vma);
182 unsigned long pfn;
183
184 switch (vmf->pgoff) {
185 case VVAR_DATA_PAGE_OFFSET:
186 if (timens_page)
187 pfn = page_to_pfn(timens_page);
188 else
189 pfn = sym_to_pfn(vdso_data);
190 break;
191 #ifdef CONFIG_TIME_NS
192 case VVAR_TIMENS_PAGE_OFFSET:
193 /*
194 * If a task belongs to a time namespace then a namespace
195 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
196 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
197 * offset.
198 * See also the comment near timens_setup_vdso_data().
199 */
200 if (!timens_page)
201 return VM_FAULT_SIGBUS;
202 pfn = sym_to_pfn(vdso_data);
203 break;
204 #endif /* CONFIG_TIME_NS */
205 default:
206 return VM_FAULT_SIGBUS;
207 }
208
209 return vmf_insert_pfn(vma, vmf->address, pfn);
210 }
211
__setup_additional_pages(enum vdso_abi abi,struct mm_struct * mm,struct linux_binprm * bprm,int uses_interp)212 static int __setup_additional_pages(enum vdso_abi abi,
213 struct mm_struct *mm,
214 struct linux_binprm *bprm,
215 int uses_interp)
216 {
217 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
218 unsigned long gp_flags = 0;
219 void *ret;
220
221 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
222
223 vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
224 /* Be sure to map the data page */
225 vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
226
227 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
228 if (IS_ERR_VALUE(vdso_base)) {
229 ret = ERR_PTR(vdso_base);
230 goto up_fail;
231 }
232
233 ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
234 VM_READ|VM_MAYREAD|VM_PFNMAP,
235 vdso_info[abi].dm);
236 if (IS_ERR(ret))
237 goto up_fail;
238
239 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
240 gp_flags = VM_ARM64_BTI;
241
242 vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
243 mm->context.vdso = (void *)vdso_base;
244 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
245 VM_READ|VM_EXEC|gp_flags|
246 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
247 vdso_info[abi].cm);
248 if (IS_ERR(ret))
249 goto up_fail;
250
251 return 0;
252
253 up_fail:
254 mm->context.vdso = NULL;
255 return PTR_ERR(ret);
256 }
257
258 #ifdef CONFIG_COMPAT
259 /*
260 * Create and map the vectors page for AArch32 tasks.
261 */
262 enum aarch32_map {
263 AA32_MAP_VECTORS, /* kuser helpers */
264 AA32_MAP_SIGPAGE,
265 AA32_MAP_VVAR,
266 AA32_MAP_VDSO,
267 };
268
269 static struct page *aarch32_vectors_page __ro_after_init;
270 static struct page *aarch32_sig_page __ro_after_init;
271
aarch32_sigpage_mremap(const struct vm_special_mapping * sm,struct vm_area_struct * new_vma)272 static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
273 struct vm_area_struct *new_vma)
274 {
275 current->mm->context.sigpage = (void *)new_vma->vm_start;
276
277 return 0;
278 }
279
280 static struct vm_special_mapping aarch32_vdso_maps[] = {
281 [AA32_MAP_VECTORS] = {
282 .name = "[vectors]", /* ABI */
283 .pages = &aarch32_vectors_page,
284 },
285 [AA32_MAP_SIGPAGE] = {
286 .name = "[sigpage]", /* ABI */
287 .pages = &aarch32_sig_page,
288 .mremap = aarch32_sigpage_mremap,
289 },
290 [AA32_MAP_VVAR] = {
291 .name = "[vvar]",
292 .fault = vvar_fault,
293 },
294 [AA32_MAP_VDSO] = {
295 .name = "[vdso]",
296 .mremap = vdso_mremap,
297 },
298 };
299
aarch32_alloc_kuser_vdso_page(void)300 static int aarch32_alloc_kuser_vdso_page(void)
301 {
302 extern char __kuser_helper_start[], __kuser_helper_end[];
303 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
304 unsigned long vdso_page;
305
306 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
307 return 0;
308
309 vdso_page = get_zeroed_page(GFP_KERNEL);
310 if (!vdso_page)
311 return -ENOMEM;
312
313 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
314 kuser_sz);
315 aarch32_vectors_page = virt_to_page(vdso_page);
316 return 0;
317 }
318
319 #define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
aarch32_alloc_sigpage(void)320 static int aarch32_alloc_sigpage(void)
321 {
322 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
323 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
324 __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
325 void *sigpage;
326
327 sigpage = (void *)__get_free_page(GFP_KERNEL);
328 if (!sigpage)
329 return -ENOMEM;
330
331 memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
332 memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
333 aarch32_sig_page = virt_to_page(sigpage);
334 return 0;
335 }
336
__aarch32_alloc_vdso_pages(void)337 static int __init __aarch32_alloc_vdso_pages(void)
338 {
339
340 if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
341 return 0;
342
343 vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
344 vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
345
346 return __vdso_init(VDSO_ABI_AA32);
347 }
348
aarch32_alloc_vdso_pages(void)349 static int __init aarch32_alloc_vdso_pages(void)
350 {
351 int ret;
352
353 ret = __aarch32_alloc_vdso_pages();
354 if (ret)
355 return ret;
356
357 ret = aarch32_alloc_sigpage();
358 if (ret)
359 return ret;
360
361 return aarch32_alloc_kuser_vdso_page();
362 }
363 arch_initcall(aarch32_alloc_vdso_pages);
364
aarch32_kuser_helpers_setup(struct mm_struct * mm)365 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
366 {
367 void *ret;
368
369 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
370 return 0;
371
372 /*
373 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
374 * not safe to CoW the page containing the CPU exception vectors.
375 */
376 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
377 VM_READ | VM_EXEC |
378 VM_MAYREAD | VM_MAYEXEC,
379 &aarch32_vdso_maps[AA32_MAP_VECTORS]);
380
381 return PTR_ERR_OR_ZERO(ret);
382 }
383
aarch32_sigreturn_setup(struct mm_struct * mm)384 static int aarch32_sigreturn_setup(struct mm_struct *mm)
385 {
386 unsigned long addr;
387 void *ret;
388
389 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
390 if (IS_ERR_VALUE(addr)) {
391 ret = ERR_PTR(addr);
392 goto out;
393 }
394
395 /*
396 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
397 * set breakpoints.
398 */
399 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
400 VM_READ | VM_EXEC | VM_MAYREAD |
401 VM_MAYWRITE | VM_MAYEXEC,
402 &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
403 if (IS_ERR(ret))
404 goto out;
405
406 mm->context.sigpage = (void *)addr;
407
408 out:
409 return PTR_ERR_OR_ZERO(ret);
410 }
411
aarch32_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)412 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
413 {
414 struct mm_struct *mm = current->mm;
415 int ret;
416
417 if (mmap_write_lock_killable(mm))
418 return -EINTR;
419
420 ret = aarch32_kuser_helpers_setup(mm);
421 if (ret)
422 goto out;
423
424 if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
425 ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
426 uses_interp);
427 if (ret)
428 goto out;
429 }
430
431 ret = aarch32_sigreturn_setup(mm);
432 out:
433 mmap_write_unlock(mm);
434 return ret;
435 }
436 #endif /* CONFIG_COMPAT */
437
438 enum aarch64_map {
439 AA64_MAP_VVAR,
440 AA64_MAP_VDSO,
441 };
442
443 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
444 [AA64_MAP_VVAR] = {
445 .name = "[vvar]",
446 .fault = vvar_fault,
447 },
448 [AA64_MAP_VDSO] = {
449 .name = "[vdso]",
450 .mremap = vdso_mremap,
451 },
452 };
453
vdso_init(void)454 static int __init vdso_init(void)
455 {
456 vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
457 vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
458
459 return __vdso_init(VDSO_ABI_AA64);
460 }
461 arch_initcall(vdso_init);
462
arch_setup_additional_pages(struct linux_binprm * bprm,int uses_interp)463 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
464 {
465 struct mm_struct *mm = current->mm;
466 int ret;
467
468 if (mmap_write_lock_killable(mm))
469 return -EINTR;
470
471 ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
472 mmap_write_unlock(mm);
473
474 return ret;
475 }
476