1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
9 
10 #include <asm/pgtable.h>
11 #include <asm/tlbflush.h>
12 #include <as-layout.h>
13 #include <mem_user.h>
14 #include <os.h>
15 #include <skas.h>
16 #include <kern_util.h>
17 
18 struct host_vm_change {
19 	struct host_vm_op {
20 		enum { NONE, MMAP, MUNMAP, MPROTECT } type;
21 		union {
22 			struct {
23 				unsigned long addr;
24 				unsigned long len;
25 				unsigned int prot;
26 				int fd;
27 				__u64 offset;
28 			} mmap;
29 			struct {
30 				unsigned long addr;
31 				unsigned long len;
32 			} munmap;
33 			struct {
34 				unsigned long addr;
35 				unsigned long len;
36 				unsigned int prot;
37 			} mprotect;
38 		} u;
39 	} ops[1];
40 	int index;
41 	struct mm_id *id;
42 	void *data;
43 	int force;
44 };
45 
46 #define INIT_HVC(mm, force) \
47 	((struct host_vm_change) \
48 	 { .ops		= { { .type = NONE } },	\
49 	   .id		= &mm->context.id, \
50        	   .data	= NULL, \
51 	   .index	= 0, \
52 	   .force	= force })
53 
report_enomem(void)54 static void report_enomem(void)
55 {
56 	printk(KERN_ERR "UML ran out of memory on the host side! "
57 			"This can happen due to a memory limitation or "
58 			"vm.max_map_count has been reached.\n");
59 }
60 
do_ops(struct host_vm_change * hvc,int end,int finished)61 static int do_ops(struct host_vm_change *hvc, int end,
62 		  int finished)
63 {
64 	struct host_vm_op *op;
65 	int i, ret = 0;
66 
67 	for (i = 0; i < end && !ret; i++) {
68 		op = &hvc->ops[i];
69 		switch (op->type) {
70 		case MMAP:
71 			ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
72 				  op->u.mmap.prot, op->u.mmap.fd,
73 				  op->u.mmap.offset, finished, &hvc->data);
74 			break;
75 		case MUNMAP:
76 			ret = unmap(hvc->id, op->u.munmap.addr,
77 				    op->u.munmap.len, finished, &hvc->data);
78 			break;
79 		case MPROTECT:
80 			ret = protect(hvc->id, op->u.mprotect.addr,
81 				      op->u.mprotect.len, op->u.mprotect.prot,
82 				      finished, &hvc->data);
83 			break;
84 		default:
85 			printk(KERN_ERR "Unknown op type %d in do_ops\n",
86 			       op->type);
87 			BUG();
88 			break;
89 		}
90 	}
91 
92 	if (ret == -ENOMEM)
93 		report_enomem();
94 
95 	return ret;
96 }
97 
add_mmap(unsigned long virt,unsigned long phys,unsigned long len,unsigned int prot,struct host_vm_change * hvc)98 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
99 		    unsigned int prot, struct host_vm_change *hvc)
100 {
101 	__u64 offset;
102 	struct host_vm_op *last;
103 	int fd, ret = 0;
104 
105 	fd = phys_mapping(phys, &offset);
106 	if (hvc->index != 0) {
107 		last = &hvc->ops[hvc->index - 1];
108 		if ((last->type == MMAP) &&
109 		   (last->u.mmap.addr + last->u.mmap.len == virt) &&
110 		   (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
111 		   (last->u.mmap.offset + last->u.mmap.len == offset)) {
112 			last->u.mmap.len += len;
113 			return 0;
114 		}
115 	}
116 
117 	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
118 		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
119 		hvc->index = 0;
120 	}
121 
122 	hvc->ops[hvc->index++] = ((struct host_vm_op)
123 				  { .type	= MMAP,
124 				    .u = { .mmap = { .addr	= virt,
125 						     .len	= len,
126 						     .prot	= prot,
127 						     .fd	= fd,
128 						     .offset	= offset }
129 			   } });
130 	return ret;
131 }
132 
add_munmap(unsigned long addr,unsigned long len,struct host_vm_change * hvc)133 static int add_munmap(unsigned long addr, unsigned long len,
134 		      struct host_vm_change *hvc)
135 {
136 	struct host_vm_op *last;
137 	int ret = 0;
138 
139 	if ((addr >= STUB_START) && (addr < STUB_END))
140 		return -EINVAL;
141 
142 	if (hvc->index != 0) {
143 		last = &hvc->ops[hvc->index - 1];
144 		if ((last->type == MUNMAP) &&
145 		   (last->u.munmap.addr + last->u.mmap.len == addr)) {
146 			last->u.munmap.len += len;
147 			return 0;
148 		}
149 	}
150 
151 	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
152 		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
153 		hvc->index = 0;
154 	}
155 
156 	hvc->ops[hvc->index++] = ((struct host_vm_op)
157 				  { .type	= MUNMAP,
158 			     	    .u = { .munmap = { .addr	= addr,
159 						       .len	= len } } });
160 	return ret;
161 }
162 
add_mprotect(unsigned long addr,unsigned long len,unsigned int prot,struct host_vm_change * hvc)163 static int add_mprotect(unsigned long addr, unsigned long len,
164 			unsigned int prot, struct host_vm_change *hvc)
165 {
166 	struct host_vm_op *last;
167 	int ret = 0;
168 
169 	if (hvc->index != 0) {
170 		last = &hvc->ops[hvc->index - 1];
171 		if ((last->type == MPROTECT) &&
172 		   (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
173 		   (last->u.mprotect.prot == prot)) {
174 			last->u.mprotect.len += len;
175 			return 0;
176 		}
177 	}
178 
179 	if (hvc->index == ARRAY_SIZE(hvc->ops)) {
180 		ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
181 		hvc->index = 0;
182 	}
183 
184 	hvc->ops[hvc->index++] = ((struct host_vm_op)
185 				  { .type	= MPROTECT,
186 			     	    .u = { .mprotect = { .addr	= addr,
187 							 .len	= len,
188 							 .prot	= prot } } });
189 	return ret;
190 }
191 
192 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
193 
update_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct host_vm_change * hvc)194 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
195 				   unsigned long end,
196 				   struct host_vm_change *hvc)
197 {
198 	pte_t *pte;
199 	int r, w, x, prot, ret = 0;
200 
201 	pte = pte_offset_kernel(pmd, addr);
202 	do {
203 		if ((addr >= STUB_START) && (addr < STUB_END))
204 			continue;
205 
206 		r = pte_read(*pte);
207 		w = pte_write(*pte);
208 		x = pte_exec(*pte);
209 		if (!pte_young(*pte)) {
210 			r = 0;
211 			w = 0;
212 		} else if (!pte_dirty(*pte))
213 			w = 0;
214 
215 		prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
216 			(x ? UM_PROT_EXEC : 0));
217 		if (hvc->force || pte_newpage(*pte)) {
218 			if (pte_present(*pte))
219 				ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
220 					       PAGE_SIZE, prot, hvc);
221 			else
222 				ret = add_munmap(addr, PAGE_SIZE, hvc);
223 		} else if (pte_newprot(*pte))
224 			ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
225 		*pte = pte_mkuptodate(*pte);
226 	} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
227 	return ret;
228 }
229 
update_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,struct host_vm_change * hvc)230 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
231 				   unsigned long end,
232 				   struct host_vm_change *hvc)
233 {
234 	pmd_t *pmd;
235 	unsigned long next;
236 	int ret = 0;
237 
238 	pmd = pmd_offset(pud, addr);
239 	do {
240 		next = pmd_addr_end(addr, end);
241 		if (!pmd_present(*pmd)) {
242 			if (hvc->force || pmd_newpage(*pmd)) {
243 				ret = add_munmap(addr, next - addr, hvc);
244 				pmd_mkuptodate(*pmd);
245 			}
246 		}
247 		else ret = update_pte_range(pmd, addr, next, hvc);
248 	} while (pmd++, addr = next, ((addr < end) && !ret));
249 	return ret;
250 }
251 
update_pud_range(pgd_t * pgd,unsigned long addr,unsigned long end,struct host_vm_change * hvc)252 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
253 				   unsigned long end,
254 				   struct host_vm_change *hvc)
255 {
256 	pud_t *pud;
257 	unsigned long next;
258 	int ret = 0;
259 
260 	pud = pud_offset(pgd, addr);
261 	do {
262 		next = pud_addr_end(addr, end);
263 		if (!pud_present(*pud)) {
264 			if (hvc->force || pud_newpage(*pud)) {
265 				ret = add_munmap(addr, next - addr, hvc);
266 				pud_mkuptodate(*pud);
267 			}
268 		}
269 		else ret = update_pmd_range(pud, addr, next, hvc);
270 	} while (pud++, addr = next, ((addr < end) && !ret));
271 	return ret;
272 }
273 
fix_range_common(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr,int force)274 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
275 		      unsigned long end_addr, int force)
276 {
277 	pgd_t *pgd;
278 	struct host_vm_change hvc;
279 	unsigned long addr = start_addr, next;
280 	int ret = 0;
281 
282 	hvc = INIT_HVC(mm, force);
283 	pgd = pgd_offset(mm, addr);
284 	do {
285 		next = pgd_addr_end(addr, end_addr);
286 		if (!pgd_present(*pgd)) {
287 			if (force || pgd_newpage(*pgd)) {
288 				ret = add_munmap(addr, next - addr, &hvc);
289 				pgd_mkuptodate(*pgd);
290 			}
291 		}
292 		else ret = update_pud_range(pgd, addr, next, &hvc);
293 	} while (pgd++, addr = next, ((addr < end_addr) && !ret));
294 
295 	if (!ret)
296 		ret = do_ops(&hvc, hvc.index, 1);
297 
298 	/* This is not an else because ret is modified above */
299 	if (ret) {
300 		printk(KERN_ERR "fix_range_common: failed, killing current "
301 		       "process: %d\n", task_tgid_vnr(current));
302 		/* We are under mmap_sem, release it such that current can terminate */
303 		up_write(&current->mm->mmap_sem);
304 		force_sig(SIGKILL, current);
305 		do_signal(&current->thread.regs);
306 	}
307 }
308 
flush_tlb_kernel_range_common(unsigned long start,unsigned long end)309 static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
310 {
311 	struct mm_struct *mm;
312 	pgd_t *pgd;
313 	pud_t *pud;
314 	pmd_t *pmd;
315 	pte_t *pte;
316 	unsigned long addr, last;
317 	int updated = 0, err;
318 
319 	mm = &init_mm;
320 	for (addr = start; addr < end;) {
321 		pgd = pgd_offset(mm, addr);
322 		if (!pgd_present(*pgd)) {
323 			last = ADD_ROUND(addr, PGDIR_SIZE);
324 			if (last > end)
325 				last = end;
326 			if (pgd_newpage(*pgd)) {
327 				updated = 1;
328 				err = os_unmap_memory((void *) addr,
329 						      last - addr);
330 				if (err < 0)
331 					panic("munmap failed, errno = %d\n",
332 					      -err);
333 			}
334 			addr = last;
335 			continue;
336 		}
337 
338 		pud = pud_offset(pgd, addr);
339 		if (!pud_present(*pud)) {
340 			last = ADD_ROUND(addr, PUD_SIZE);
341 			if (last > end)
342 				last = end;
343 			if (pud_newpage(*pud)) {
344 				updated = 1;
345 				err = os_unmap_memory((void *) addr,
346 						      last - addr);
347 				if (err < 0)
348 					panic("munmap failed, errno = %d\n",
349 					      -err);
350 			}
351 			addr = last;
352 			continue;
353 		}
354 
355 		pmd = pmd_offset(pud, addr);
356 		if (!pmd_present(*pmd)) {
357 			last = ADD_ROUND(addr, PMD_SIZE);
358 			if (last > end)
359 				last = end;
360 			if (pmd_newpage(*pmd)) {
361 				updated = 1;
362 				err = os_unmap_memory((void *) addr,
363 						      last - addr);
364 				if (err < 0)
365 					panic("munmap failed, errno = %d\n",
366 					      -err);
367 			}
368 			addr = last;
369 			continue;
370 		}
371 
372 		pte = pte_offset_kernel(pmd, addr);
373 		if (!pte_present(*pte) || pte_newpage(*pte)) {
374 			updated = 1;
375 			err = os_unmap_memory((void *) addr,
376 					      PAGE_SIZE);
377 			if (err < 0)
378 				panic("munmap failed, errno = %d\n",
379 				      -err);
380 			if (pte_present(*pte))
381 				map_memory(addr,
382 					   pte_val(*pte) & PAGE_MASK,
383 					   PAGE_SIZE, 1, 1, 1);
384 		}
385 		else if (pte_newprot(*pte)) {
386 			updated = 1;
387 			os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
388 		}
389 		addr += PAGE_SIZE;
390 	}
391 	return updated;
392 }
393 
flush_tlb_page(struct vm_area_struct * vma,unsigned long address)394 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
395 {
396 	pgd_t *pgd;
397 	pud_t *pud;
398 	pmd_t *pmd;
399 	pte_t *pte;
400 	struct mm_struct *mm = vma->vm_mm;
401 	void *flush = NULL;
402 	int r, w, x, prot, err = 0;
403 	struct mm_id *mm_id;
404 
405 	address &= PAGE_MASK;
406 	pgd = pgd_offset(mm, address);
407 	if (!pgd_present(*pgd))
408 		goto kill;
409 
410 	pud = pud_offset(pgd, address);
411 	if (!pud_present(*pud))
412 		goto kill;
413 
414 	pmd = pmd_offset(pud, address);
415 	if (!pmd_present(*pmd))
416 		goto kill;
417 
418 	pte = pte_offset_kernel(pmd, address);
419 
420 	r = pte_read(*pte);
421 	w = pte_write(*pte);
422 	x = pte_exec(*pte);
423 	if (!pte_young(*pte)) {
424 		r = 0;
425 		w = 0;
426 	} else if (!pte_dirty(*pte)) {
427 		w = 0;
428 	}
429 
430 	mm_id = &mm->context.id;
431 	prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
432 		(x ? UM_PROT_EXEC : 0));
433 	if (pte_newpage(*pte)) {
434 		if (pte_present(*pte)) {
435 			unsigned long long offset;
436 			int fd;
437 
438 			fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
439 			err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
440 				  1, &flush);
441 		}
442 		else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
443 	}
444 	else if (pte_newprot(*pte))
445 		err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
446 
447 	if (err) {
448 		if (err == -ENOMEM)
449 			report_enomem();
450 
451 		goto kill;
452 	}
453 
454 	*pte = pte_mkuptodate(*pte);
455 
456 	return;
457 
458 kill:
459 	printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
460 	force_sig(SIGKILL, current);
461 }
462 
pgd_offset_proc(struct mm_struct * mm,unsigned long address)463 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
464 {
465 	return pgd_offset(mm, address);
466 }
467 
pud_offset_proc(pgd_t * pgd,unsigned long address)468 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
469 {
470 	return pud_offset(pgd, address);
471 }
472 
pmd_offset_proc(pud_t * pud,unsigned long address)473 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
474 {
475 	return pmd_offset(pud, address);
476 }
477 
pte_offset_proc(pmd_t * pmd,unsigned long address)478 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
479 {
480 	return pte_offset_kernel(pmd, address);
481 }
482 
addr_pte(struct task_struct * task,unsigned long addr)483 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
484 {
485 	pgd_t *pgd = pgd_offset(task->mm, addr);
486 	pud_t *pud = pud_offset(pgd, addr);
487 	pmd_t *pmd = pmd_offset(pud, addr);
488 
489 	return pte_offset_map(pmd, addr);
490 }
491 
flush_tlb_all(void)492 void flush_tlb_all(void)
493 {
494 	flush_tlb_mm(current->mm);
495 }
496 
flush_tlb_kernel_range(unsigned long start,unsigned long end)497 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
498 {
499 	flush_tlb_kernel_range_common(start, end);
500 }
501 
flush_tlb_kernel_vm(void)502 void flush_tlb_kernel_vm(void)
503 {
504 	flush_tlb_kernel_range_common(start_vm, end_vm);
505 }
506 
__flush_tlb_one(unsigned long addr)507 void __flush_tlb_one(unsigned long addr)
508 {
509 	flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
510 }
511 
fix_range(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr,int force)512 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
513 		      unsigned long end_addr, int force)
514 {
515 	fix_range_common(mm, start_addr, end_addr, force);
516 }
517 
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)518 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
519 		     unsigned long end)
520 {
521 	if (vma->vm_mm == NULL)
522 		flush_tlb_kernel_range_common(start, end);
523 	else fix_range(vma->vm_mm, start, end, 0);
524 }
525 EXPORT_SYMBOL(flush_tlb_range);
526 
flush_tlb_mm_range(struct mm_struct * mm,unsigned long start,unsigned long end)527 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
528 			unsigned long end)
529 {
530 	/*
531 	 * Don't bother flushing if this address space is about to be
532 	 * destroyed.
533 	 */
534 	if (atomic_read(&mm->mm_users) == 0)
535 		return;
536 
537 	fix_range(mm, start, end, 0);
538 }
539 
flush_tlb_mm(struct mm_struct * mm)540 void flush_tlb_mm(struct mm_struct *mm)
541 {
542 	struct vm_area_struct *vma = mm->mmap;
543 
544 	while (vma != NULL) {
545 		fix_range(mm, vma->vm_start, vma->vm_end, 0);
546 		vma = vma->vm_next;
547 	}
548 }
549 
force_flush_all(void)550 void force_flush_all(void)
551 {
552 	struct mm_struct *mm = current->mm;
553 	struct vm_area_struct *vma = mm->mmap;
554 
555 	while (vma != NULL) {
556 		fix_range(mm, vma->vm_start, vma->vm_end, 1);
557 		vma = vma->vm_next;
558 	}
559 }
560