1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 *
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10 #ifndef __ASM_ARM_MMU_CONTEXT_H
11 #define __ASM_ARM_MMU_CONTEXT_H
12
13 #include <linux/compiler.h>
14 #include <linux/sched.h>
15 #include <linux/mm_types.h>
16 #include <linux/preempt.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/proc-fns.h>
21 #include <asm/smp_plat.h>
22 #include <asm-generic/mm_hooks.h>
23
24 void __check_vmalloc_seq(struct mm_struct *mm);
25
26 #ifdef CONFIG_CPU_HAS_ASID
27
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)30 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
31 {
32 atomic64_set(&mm->context.id, 0);
33 return 0;
34 }
35
36 #ifdef CONFIG_ARM_ERRATA_798181
37 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
38 cpumask_t *mask);
39 #else /* !CONFIG_ARM_ERRATA_798181 */
a15_erratum_get_cpumask(int this_cpu,struct mm_struct * mm,cpumask_t * mask)40 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
41 cpumask_t *mask)
42 {
43 }
44 #endif /* CONFIG_ARM_ERRATA_798181 */
45
46 #else /* !CONFIG_CPU_HAS_ASID */
47
48 #ifdef CONFIG_MMU
49
check_and_switch_context(struct mm_struct * mm,struct task_struct * tsk)50 static inline void check_and_switch_context(struct mm_struct *mm,
51 struct task_struct *tsk)
52 {
53 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
54 __check_vmalloc_seq(mm);
55
56 if (irqs_disabled())
57 /*
58 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
59 * high interrupt latencies, defer the call and continue
60 * running with the old mm. Since we only support UP systems
61 * on non-ASID CPUs, the old mm will remain valid until the
62 * finish_arch_post_lock_switch() call.
63 */
64 mm->context.switch_pending = 1;
65 else
66 cpu_switch_mm(mm->pgd, mm);
67 }
68
69 #ifndef MODULE
70 #define finish_arch_post_lock_switch \
71 finish_arch_post_lock_switch
finish_arch_post_lock_switch(void)72 static inline void finish_arch_post_lock_switch(void)
73 {
74 struct mm_struct *mm = current->mm;
75
76 if (mm && mm->context.switch_pending) {
77 /*
78 * Preemption must be disabled during cpu_switch_mm() as we
79 * have some stateful cache flush implementations. Check
80 * switch_pending again in case we were preempted and the
81 * switch to this mm was already done.
82 */
83 preempt_disable();
84 if (mm->context.switch_pending) {
85 mm->context.switch_pending = 0;
86 cpu_switch_mm(mm->pgd, mm);
87 }
88 preempt_enable_no_resched();
89 }
90 }
91 #endif /* !MODULE */
92
93 #endif /* CONFIG_MMU */
94
95 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)96 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
97 {
98 return 0;
99 }
100
101
102 #endif /* CONFIG_CPU_HAS_ASID */
103
104 #define destroy_context(mm) do { } while(0)
105 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
106
107 /*
108 * This is called when "tsk" is about to enter lazy TLB mode.
109 *
110 * mm: describes the currently active mm context
111 * tsk: task which is entering lazy tlb
112 * cpu: cpu number which is entering lazy tlb
113 *
114 * tsk->mm will be NULL
115 */
116 static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)117 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
118 {
119 }
120
121 /*
122 * This is the actual mm switch as far as the scheduler
123 * is concerned. No registers are touched. We avoid
124 * calling the CPU specific function when the mm hasn't
125 * actually changed.
126 */
127 static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)128 switch_mm(struct mm_struct *prev, struct mm_struct *next,
129 struct task_struct *tsk)
130 {
131 #ifdef CONFIG_MMU
132 unsigned int cpu = smp_processor_id();
133
134 /*
135 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
136 * so check for possible thread migration and invalidate the I-cache
137 * if we're new to this CPU.
138 */
139 if (cache_ops_need_broadcast() &&
140 !cpumask_empty(mm_cpumask(next)) &&
141 !cpumask_test_cpu(cpu, mm_cpumask(next)))
142 __flush_icache_all();
143
144 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
145 check_and_switch_context(next, tsk);
146 if (cache_is_vivt())
147 cpumask_clear_cpu(cpu, mm_cpumask(prev));
148 }
149 #endif
150 }
151
152 #define deactivate_mm(tsk,mm) do { } while (0)
153
154 #endif
155