1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5 #define MMU_NO_CONTEXT ~0UL
6
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10
11 /* TLB flush actions. Used as argument to tlbiel_all() */
12 enum {
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
15 };
16
tlbiel_all(void)17 static inline void tlbiel_all(void)
18 {
19 /*
20 * This is used for host machine check and bootup.
21 *
22 * This uses early_radix_enabled and implementations use
23 * early_cpu_has_feature etc because that works early in boot
24 * and this is the machine check path which is not performance
25 * critical.
26 */
27 if (early_radix_enabled())
28 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
29 else
30 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
31 }
32
tlbiel_all_lpid(bool radix)33 static inline void tlbiel_all_lpid(bool radix)
34 {
35 /*
36 * This is used for guest machine check.
37 */
38 if (radix)
39 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
40 else
41 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
42 }
43
44
45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
47 unsigned long start, unsigned long end)
48 {
49 if (radix_enabled())
50 return radix__flush_pmd_tlb_range(vma, start, end);
51 return hash__flush_tlb_range(vma, start, end);
52 }
53
54 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
flush_hugetlb_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)55 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
56 unsigned long start,
57 unsigned long end)
58 {
59 if (radix_enabled())
60 return radix__flush_hugetlb_tlb_range(vma, start, end);
61 return hash__flush_tlb_range(vma, start, end);
62 }
63
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)64 static inline void flush_tlb_range(struct vm_area_struct *vma,
65 unsigned long start, unsigned long end)
66 {
67 if (radix_enabled())
68 return radix__flush_tlb_range(vma, start, end);
69 return hash__flush_tlb_range(vma, start, end);
70 }
71
flush_tlb_kernel_range(unsigned long start,unsigned long end)72 static inline void flush_tlb_kernel_range(unsigned long start,
73 unsigned long end)
74 {
75 if (radix_enabled())
76 return radix__flush_tlb_kernel_range(start, end);
77 return hash__flush_tlb_kernel_range(start, end);
78 }
79
local_flush_tlb_mm(struct mm_struct * mm)80 static inline void local_flush_tlb_mm(struct mm_struct *mm)
81 {
82 if (radix_enabled())
83 return radix__local_flush_tlb_mm(mm);
84 return hash__local_flush_tlb_mm(mm);
85 }
86
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)87 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
88 unsigned long vmaddr)
89 {
90 if (radix_enabled())
91 return radix__local_flush_tlb_page(vma, vmaddr);
92 return hash__local_flush_tlb_page(vma, vmaddr);
93 }
94
local_flush_all_mm(struct mm_struct * mm)95 static inline void local_flush_all_mm(struct mm_struct *mm)
96 {
97 if (radix_enabled())
98 return radix__local_flush_all_mm(mm);
99 return hash__local_flush_all_mm(mm);
100 }
101
tlb_flush(struct mmu_gather * tlb)102 static inline void tlb_flush(struct mmu_gather *tlb)
103 {
104 if (radix_enabled())
105 return radix__tlb_flush(tlb);
106 return hash__tlb_flush(tlb);
107 }
108
109 #ifdef CONFIG_SMP
flush_tlb_mm(struct mm_struct * mm)110 static inline void flush_tlb_mm(struct mm_struct *mm)
111 {
112 if (radix_enabled())
113 return radix__flush_tlb_mm(mm);
114 return hash__flush_tlb_mm(mm);
115 }
116
flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)117 static inline void flush_tlb_page(struct vm_area_struct *vma,
118 unsigned long vmaddr)
119 {
120 if (radix_enabled())
121 return radix__flush_tlb_page(vma, vmaddr);
122 return hash__flush_tlb_page(vma, vmaddr);
123 }
124
flush_all_mm(struct mm_struct * mm)125 static inline void flush_all_mm(struct mm_struct *mm)
126 {
127 if (radix_enabled())
128 return radix__flush_all_mm(mm);
129 return hash__flush_all_mm(mm);
130 }
131 #else
132 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
133 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
134 #define flush_all_mm(mm) local_flush_all_mm(mm)
135 #endif /* CONFIG_SMP */
136
137 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address)138 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
139 unsigned long address)
140 {
141 /*
142 * Book3S 64 does not require spurious fault flushes because the PTE
143 * must be re-fetched in case of an access permission problem. So the
144 * only reason for a spurious fault should be concurrent modification
145 * to the PTE, in which case the PTE will eventually be re-fetched by
146 * the MMU when it attempts the access again.
147 *
148 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
149 * Entry, Setting a Reference or Change Bit or Upgrading Access
150 * Authority (PTE Subject to Atomic Hardware Updates):
151 *
152 * "If the only change being made to a valid PTE that is subject to
153 * atomic hardware updates is to set the Reference or Change bit to
154 * 1 or to upgrade access authority, a simpler sequence suffices
155 * because the translation hardware will refetch the PTE if an
156 * access is attempted for which the only problems were reference
157 * and/or change bits needing to be set or insufficient access
158 * authority."
159 *
160 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
161 * it avoids the spurious fault problem by flushing the TLB before
162 * upgrading PTE permissions, see radix__ptep_set_access_flags.
163 */
164 }
165
__pte_flags_need_flush(unsigned long oldval,unsigned long newval)166 static inline bool __pte_flags_need_flush(unsigned long oldval,
167 unsigned long newval)
168 {
169 unsigned long delta = oldval ^ newval;
170
171 /*
172 * The return value of this function doesn't matter for hash,
173 * ptep_modify_prot_start() does a pte_update() which does or schedules
174 * any necessary hash table update and flush.
175 */
176 if (!radix_enabled())
177 return true;
178
179 /*
180 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
181 */
182 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
183 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
184 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
185 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
186 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
187 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
188
189 /*
190 * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
191 *
192 * In theory, some changed software bits could be tolerated, in
193 * practice those should rarely if ever matter.
194 */
195
196 if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
197 return true;
198
199 /*
200 * If any of the above was present in old but cleared in new, flush.
201 * With the exception of _PAGE_ACCESSED, don't worry about flushing
202 * if that was cleared (see the comment in ptep_clear_flush_young()).
203 */
204 if ((delta & ~_PAGE_ACCESSED) & oldval)
205 return true;
206
207 return false;
208 }
209
pte_needs_flush(pte_t oldpte,pte_t newpte)210 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
211 {
212 return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
213 }
214 #define pte_needs_flush pte_needs_flush
215
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)216 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
217 {
218 return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
219 }
220 #define huge_pmd_needs_flush huge_pmd_needs_flush
221
222 extern bool tlbie_capable;
223 extern bool tlbie_enabled;
224
cputlb_use_tlbie(void)225 static inline bool cputlb_use_tlbie(void)
226 {
227 return tlbie_enabled;
228 }
229
230 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
231