1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
5 #define MMU_NO_CONTEXT ~0UL
6
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
10
11 /* TLB flush actions. Used as argument to tlbiel_all() */
12 enum {
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
15 };
16
17 #ifdef CONFIG_PPC_NATIVE
tlbiel_all(void)18 static inline void tlbiel_all(void)
19 {
20 /*
21 * This is used for host machine check and bootup.
22 *
23 * This uses early_radix_enabled and implementations use
24 * early_cpu_has_feature etc because that works early in boot
25 * and this is the machine check path which is not performance
26 * critical.
27 */
28 if (early_radix_enabled())
29 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30 else
31 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
32 }
33 #else
tlbiel_all(void)34 static inline void tlbiel_all(void) { BUG(); };
35 #endif
36
tlbiel_all_lpid(bool radix)37 static inline void tlbiel_all_lpid(bool radix)
38 {
39 /*
40 * This is used for guest machine check.
41 */
42 if (radix)
43 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
44 else
45 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
46 }
47
48
49 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)50 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
51 unsigned long start, unsigned long end)
52 {
53 if (radix_enabled())
54 return radix__flush_pmd_tlb_range(vma, start, end);
55 return hash__flush_tlb_range(vma, start, end);
56 }
57
58 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
flush_hugetlb_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)59 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
60 unsigned long start,
61 unsigned long end)
62 {
63 if (radix_enabled())
64 return radix__flush_hugetlb_tlb_range(vma, start, end);
65 return hash__flush_tlb_range(vma, start, end);
66 }
67
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)68 static inline void flush_tlb_range(struct vm_area_struct *vma,
69 unsigned long start, unsigned long end)
70 {
71 if (radix_enabled())
72 return radix__flush_tlb_range(vma, start, end);
73 return hash__flush_tlb_range(vma, start, end);
74 }
75
flush_tlb_kernel_range(unsigned long start,unsigned long end)76 static inline void flush_tlb_kernel_range(unsigned long start,
77 unsigned long end)
78 {
79 if (radix_enabled())
80 return radix__flush_tlb_kernel_range(start, end);
81 return hash__flush_tlb_kernel_range(start, end);
82 }
83
local_flush_tlb_mm(struct mm_struct * mm)84 static inline void local_flush_tlb_mm(struct mm_struct *mm)
85 {
86 if (radix_enabled())
87 return radix__local_flush_tlb_mm(mm);
88 return hash__local_flush_tlb_mm(mm);
89 }
90
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)91 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
92 unsigned long vmaddr)
93 {
94 if (radix_enabled())
95 return radix__local_flush_tlb_page(vma, vmaddr);
96 return hash__local_flush_tlb_page(vma, vmaddr);
97 }
98
local_flush_all_mm(struct mm_struct * mm)99 static inline void local_flush_all_mm(struct mm_struct *mm)
100 {
101 if (radix_enabled())
102 return radix__local_flush_all_mm(mm);
103 return hash__local_flush_all_mm(mm);
104 }
105
tlb_flush(struct mmu_gather * tlb)106 static inline void tlb_flush(struct mmu_gather *tlb)
107 {
108 if (radix_enabled())
109 return radix__tlb_flush(tlb);
110 return hash__tlb_flush(tlb);
111 }
112
113 #ifdef CONFIG_SMP
flush_tlb_mm(struct mm_struct * mm)114 static inline void flush_tlb_mm(struct mm_struct *mm)
115 {
116 if (radix_enabled())
117 return radix__flush_tlb_mm(mm);
118 return hash__flush_tlb_mm(mm);
119 }
120
flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)121 static inline void flush_tlb_page(struct vm_area_struct *vma,
122 unsigned long vmaddr)
123 {
124 if (radix_enabled())
125 return radix__flush_tlb_page(vma, vmaddr);
126 return hash__flush_tlb_page(vma, vmaddr);
127 }
128
flush_all_mm(struct mm_struct * mm)129 static inline void flush_all_mm(struct mm_struct *mm)
130 {
131 if (radix_enabled())
132 return radix__flush_all_mm(mm);
133 return hash__flush_all_mm(mm);
134 }
135 #else
136 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
137 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
138 #define flush_all_mm(mm) local_flush_all_mm(mm)
139 #endif /* CONFIG_SMP */
140
141 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address)142 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
143 unsigned long address)
144 {
145 /* See ptep_set_access_flags comment */
146 if (atomic_read(&vma->vm_mm->context.copros) > 0)
147 flush_tlb_page(vma, address);
148 }
149
150 extern bool tlbie_capable;
151 extern bool tlbie_enabled;
152
cputlb_use_tlbie(void)153 static inline bool cputlb_use_tlbie(void)
154 {
155 return tlbie_enabled;
156 }
157
158 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
159