1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU uses a hash table to store virtual to
5 * physical translations, these routines flush entries from the
6 * hash table also.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/export.h>
26
27 #include <asm/tlbflush.h>
28 #include <asm/tlb.h>
29
30 #include <mm/mmu_decl.h>
31
32 /*
33 * Called when unmapping pages to flush entries from the TLB/hash table.
34 */
flush_hash_entry(struct mm_struct * mm,pte_t * ptep,unsigned long addr)35 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
36 {
37 unsigned long ptephys;
38
39 if (Hash) {
40 ptephys = __pa(ptep) & PAGE_MASK;
41 flush_hash_pages(mm->context.id, addr, ptephys, 1);
42 }
43 }
44 EXPORT_SYMBOL(flush_hash_entry);
45
46 /*
47 * Called at the end of a mmu_gather operation to make sure the
48 * TLB flush is completely done.
49 */
tlb_flush(struct mmu_gather * tlb)50 void tlb_flush(struct mmu_gather *tlb)
51 {
52 if (!Hash) {
53 /*
54 * 603 needs to flush the whole TLB here since
55 * it doesn't use a hash table.
56 */
57 _tlbia();
58 }
59 }
60
61 /*
62 * TLB flushing:
63 *
64 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
65 * - flush_tlb_page(vma, vmaddr) flushes one page
66 * - flush_tlb_range(vma, start, end) flushes a range of pages
67 * - flush_tlb_kernel_range(start, end) flushes kernel pages
68 *
69 * since the hardware hash table functions as an extension of the
70 * tlb as far as the linux tables are concerned, flush it too.
71 * -- Cort
72 */
73
flush_range(struct mm_struct * mm,unsigned long start,unsigned long end)74 static void flush_range(struct mm_struct *mm, unsigned long start,
75 unsigned long end)
76 {
77 pmd_t *pmd;
78 unsigned long pmd_end;
79 int count;
80 unsigned int ctx = mm->context.id;
81
82 if (!Hash) {
83 _tlbia();
84 return;
85 }
86 start &= PAGE_MASK;
87 if (start >= end)
88 return;
89 end = (end - 1) | ~PAGE_MASK;
90 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
91 for (;;) {
92 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
93 if (pmd_end > end)
94 pmd_end = end;
95 if (!pmd_none(*pmd)) {
96 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
97 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
98 }
99 if (pmd_end == end)
100 break;
101 start = pmd_end + 1;
102 ++pmd;
103 }
104 }
105
106 /*
107 * Flush kernel TLB entries in the given range
108 */
flush_tlb_kernel_range(unsigned long start,unsigned long end)109 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
110 {
111 flush_range(&init_mm, start, end);
112 }
113 EXPORT_SYMBOL(flush_tlb_kernel_range);
114
115 /*
116 * Flush all the (user) entries for the address space described by mm.
117 */
flush_tlb_mm(struct mm_struct * mm)118 void flush_tlb_mm(struct mm_struct *mm)
119 {
120 struct vm_area_struct *mp;
121
122 if (!Hash) {
123 _tlbia();
124 return;
125 }
126
127 /*
128 * It is safe to go down the mm's list of vmas when called
129 * from dup_mmap, holding mmap_sem. It would also be safe from
130 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
131 * but it seems dup_mmap is the only SMP case which gets here.
132 */
133 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
134 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
135 }
136 EXPORT_SYMBOL(flush_tlb_mm);
137
flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)138 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
139 {
140 struct mm_struct *mm;
141 pmd_t *pmd;
142
143 if (!Hash) {
144 _tlbie(vmaddr);
145 return;
146 }
147 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
148 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
149 if (!pmd_none(*pmd))
150 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
151 }
152 EXPORT_SYMBOL(flush_tlb_page);
153
154 /*
155 * For each address in the range, find the pte for the address
156 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
157 * the corresponding HPTE.
158 */
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)159 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
160 unsigned long end)
161 {
162 flush_range(vma->vm_mm, start, end);
163 }
164 EXPORT_SYMBOL(flush_tlb_range);
165
early_init_mmu(void)166 void __init early_init_mmu(void)
167 {
168 }
169