1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * MM context support for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8 #ifndef _ASM_MMU_CONTEXT_H
9 #define _ASM_MMU_CONTEXT_H
10
11 #include <linux/mm_types.h>
12
13 #include <asm/setup.h>
14 #include <asm/page.h>
15 #include <asm/pgalloc.h>
16 #include <asm/mem-layout.h>
17
18 /*
19 * VM port hides all TLB management, so "lazy TLB" isn't very
20 * meaningful. Even for ports to architectures with visble TLBs,
21 * this is almost invariably a null function.
22 *
23 * mm->context is set up by pgd_alloc, so no init_new_context required.
24 */
25
26 /*
27 * Switch active mm context
28 */
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)29 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
30 struct task_struct *tsk)
31 {
32 int l1;
33
34 /*
35 * For virtual machine, we have to update system map if it's been
36 * touched.
37 */
38 if (next->context.generation < prev->context.generation) {
39 for (l1 = MIN_KERNEL_SEG; l1 <= max_kernel_seg; l1++)
40 next->pgd[l1] = init_mm.pgd[l1];
41
42 next->context.generation = prev->context.generation;
43 }
44
45 __vmnewmap((void *)next->context.ptbase);
46 }
47
48 /*
49 * Activate new memory map for task
50 */
51 #define activate_mm activate_mm
activate_mm(struct mm_struct * prev,struct mm_struct * next)52 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
53 {
54 unsigned long flags;
55
56 local_irq_save(flags);
57 switch_mm(prev, next, current_thread_info()->task);
58 local_irq_restore(flags);
59 }
60
61 /* Generic hooks for arch_dup_mmap and arch_exit_mmap */
62 #include <asm-generic/mm_hooks.h>
63
64 #include <asm-generic/mmu_context.h>
65
66 #endif
67