1 /*
2 * arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Changelog:
11 * 27-06-1996 RMK Created
12 */
13 #ifndef __ASM_ARM_MMU_CONTEXT_H
14 #define __ASM_ARM_MMU_CONTEXT_H
15
16 #include <linux/compiler.h>
17 #include <linux/sched.h>
18 #include <linux/mm_types.h>
19 #include <linux/preempt.h>
20
21 #include <asm/cacheflush.h>
22 #include <asm/cachetype.h>
23 #include <asm/proc-fns.h>
24 #include <asm/smp_plat.h>
25 #include <asm-generic/mm_hooks.h>
26
27 void __check_vmalloc_seq(struct mm_struct *mm);
28
29 #ifdef CONFIG_CPU_HAS_ASID
30
31 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
32 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)33 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
34 {
35 atomic64_set(&mm->context.id, 0);
36 return 0;
37 }
38
39 #ifdef CONFIG_ARM_ERRATA_798181
40 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
41 cpumask_t *mask);
42 #else /* !CONFIG_ARM_ERRATA_798181 */
a15_erratum_get_cpumask(int this_cpu,struct mm_struct * mm,cpumask_t * mask)43 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
44 cpumask_t *mask)
45 {
46 }
47 #endif /* CONFIG_ARM_ERRATA_798181 */
48
49 #else /* !CONFIG_CPU_HAS_ASID */
50
51 #ifdef CONFIG_MMU
52
check_and_switch_context(struct mm_struct * mm,struct task_struct * tsk)53 static inline void check_and_switch_context(struct mm_struct *mm,
54 struct task_struct *tsk)
55 {
56 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
57 __check_vmalloc_seq(mm);
58
59 if (irqs_disabled())
60 /*
61 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
62 * high interrupt latencies, defer the call and continue
63 * running with the old mm. Since we only support UP systems
64 * on non-ASID CPUs, the old mm will remain valid until the
65 * finish_arch_post_lock_switch() call.
66 */
67 mm->context.switch_pending = 1;
68 else
69 cpu_switch_mm(mm->pgd, mm);
70 }
71
72 #ifndef MODULE
73 #define finish_arch_post_lock_switch \
74 finish_arch_post_lock_switch
finish_arch_post_lock_switch(void)75 static inline void finish_arch_post_lock_switch(void)
76 {
77 struct mm_struct *mm = current->mm;
78
79 if (mm && mm->context.switch_pending) {
80 /*
81 * Preemption must be disabled during cpu_switch_mm() as we
82 * have some stateful cache flush implementations. Check
83 * switch_pending again in case we were preempted and the
84 * switch to this mm was already done.
85 */
86 preempt_disable();
87 if (mm->context.switch_pending) {
88 mm->context.switch_pending = 0;
89 cpu_switch_mm(mm->pgd, mm);
90 }
91 preempt_enable_no_resched();
92 }
93 }
94 #endif /* !MODULE */
95
96 #endif /* CONFIG_MMU */
97
98 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)99 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
100 {
101 return 0;
102 }
103
104
105 #endif /* CONFIG_CPU_HAS_ASID */
106
107 #define destroy_context(mm) do { } while(0)
108 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
109
110 /*
111 * This is called when "tsk" is about to enter lazy TLB mode.
112 *
113 * mm: describes the currently active mm context
114 * tsk: task which is entering lazy tlb
115 * cpu: cpu number which is entering lazy tlb
116 *
117 * tsk->mm will be NULL
118 */
119 static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)120 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
121 {
122 }
123
124 /*
125 * This is the actual mm switch as far as the scheduler
126 * is concerned. No registers are touched. We avoid
127 * calling the CPU specific function when the mm hasn't
128 * actually changed.
129 */
130 static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)131 switch_mm(struct mm_struct *prev, struct mm_struct *next,
132 struct task_struct *tsk)
133 {
134 #ifdef CONFIG_MMU
135 unsigned int cpu = smp_processor_id();
136
137 /*
138 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
139 * so check for possible thread migration and invalidate the I-cache
140 * if we're new to this CPU.
141 */
142 if (cache_ops_need_broadcast() &&
143 !cpumask_empty(mm_cpumask(next)) &&
144 !cpumask_test_cpu(cpu, mm_cpumask(next)))
145 __flush_icache_all();
146
147 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
148 check_and_switch_context(next, tsk);
149 if (cache_is_vivt())
150 cpumask_clear_cpu(cpu, mm_cpumask(prev));
151 }
152 #endif
153 }
154
155 #define deactivate_mm(tsk,mm) do { } while (0)
156
157 #endif
158