1 /*
2  * This file contains the routines for flushing entries from the
3  * TLB and MMU hash table.
4  *
5  *  Derived from arch/ppc64/mm/init.c:
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  *
8  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10  *    Copyright (C) 1996 Paul Mackerras
11  *
12  *  Derived from "arch/i386/mm/init.c"
13  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14  *
15  *  Dave Engebretsen <engebret@us.ibm.com>
16  *      Rework for PPC64 port.
17  *
18  *  This program is free software; you can redistribute it and/or
19  *  modify it under the terms of the GNU General Public License
20  *  as published by the Free Software Foundation; either version
21  *  2 of the License, or (at your option) any later version.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/hardirq.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
30 #include <asm/tlb.h>
31 #include <asm/bug.h>
32 #include <asm/pte-walk.h>
33 
34 
35 #include <trace/events/thp.h>
36 
37 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 
39 /*
40  * A linux PTE was changed and the corresponding hash table entry
41  * neesd to be flushed. This function will either perform the flush
42  * immediately or will batch it up if the current CPU has an active
43  * batch on it.
44  */
hpte_need_flush(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long pte,int huge)45 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
46 		     pte_t *ptep, unsigned long pte, int huge)
47 {
48 	unsigned long vpn;
49 	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
50 	unsigned long vsid;
51 	unsigned int psize;
52 	int ssize;
53 	real_pte_t rpte;
54 	int i, offset;
55 
56 	i = batch->index;
57 
58 	/* Get page size (maybe move back to caller).
59 	 *
60 	 * NOTE: when using special 64K mappings in 4K environment like
61 	 * for SPEs, we obtain the page size from the slice, which thus
62 	 * must still exist (and thus the VMA not reused) at the time
63 	 * of this call
64 	 */
65 	if (huge) {
66 #ifdef CONFIG_HUGETLB_PAGE
67 		psize = get_slice_psize(mm, addr);
68 		/* Mask the address for the correct page size */
69 		addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
70 		if (unlikely(psize == MMU_PAGE_16G))
71 			offset = PTRS_PER_PUD;
72 		else
73 			offset = PTRS_PER_PMD;
74 #else
75 		BUG();
76 		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
77 #endif
78 	} else {
79 		psize = pte_pagesize_index(mm, addr, pte);
80 		/* Mask the address for the standard page size.  If we
81 		 * have a 64k page kernel, but the hardware does not
82 		 * support 64k pages, this might be different from the
83 		 * hardware page size encoded in the slice table. */
84 		addr &= PAGE_MASK;
85 		offset = PTRS_PER_PTE;
86 	}
87 
88 
89 	/* Build full vaddr */
90 	if (!is_kernel_addr(addr)) {
91 		ssize = user_segment_size(addr);
92 		vsid = get_user_vsid(&mm->context, addr, ssize);
93 	} else {
94 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
95 		ssize = mmu_kernel_ssize;
96 	}
97 	WARN_ON(vsid == 0);
98 	vpn = hpt_vpn(addr, vsid, ssize);
99 	rpte = __real_pte(__pte(pte), ptep, offset);
100 
101 	/*
102 	 * Check if we have an active batch on this CPU. If not, just
103 	 * flush now and return.
104 	 */
105 	if (!batch->active) {
106 		flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
107 		put_cpu_var(ppc64_tlb_batch);
108 		return;
109 	}
110 
111 	/*
112 	 * This can happen when we are in the middle of a TLB batch and
113 	 * we encounter memory pressure (eg copy_page_range when it tries
114 	 * to allocate a new pte). If we have to reclaim memory and end
115 	 * up scanning and resetting referenced bits then our batch context
116 	 * will change mid stream.
117 	 *
118 	 * We also need to ensure only one page size is present in a given
119 	 * batch
120 	 */
121 	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
122 		       batch->ssize != ssize)) {
123 		__flush_tlb_pending(batch);
124 		i = 0;
125 	}
126 	if (i == 0) {
127 		batch->mm = mm;
128 		batch->psize = psize;
129 		batch->ssize = ssize;
130 	}
131 	batch->pte[i] = rpte;
132 	batch->vpn[i] = vpn;
133 	batch->index = ++i;
134 	if (i >= PPC64_TLB_BATCH_NR)
135 		__flush_tlb_pending(batch);
136 	put_cpu_var(ppc64_tlb_batch);
137 }
138 
139 /*
140  * This function is called when terminating an mmu batch or when a batch
141  * is full. It will perform the flush of all the entries currently stored
142  * in a batch.
143  *
144  * Must be called from within some kind of spinlock/non-preempt region...
145  */
__flush_tlb_pending(struct ppc64_tlb_batch * batch)146 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
147 {
148 	int i, local;
149 
150 	i = batch->index;
151 	local = mm_is_thread_local(batch->mm);
152 	if (i == 1)
153 		flush_hash_page(batch->vpn[0], batch->pte[0],
154 				batch->psize, batch->ssize, local);
155 	else
156 		flush_hash_range(i, local);
157 	batch->index = 0;
158 }
159 
hash__tlb_flush(struct mmu_gather * tlb)160 void hash__tlb_flush(struct mmu_gather *tlb)
161 {
162 	struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
163 
164 	/* If there's a TLB batch pending, then we must flush it because the
165 	 * pages are going to be freed and we really don't want to have a CPU
166 	 * access a freed page because it has a stale TLB
167 	 */
168 	if (tlbbatch->index)
169 		__flush_tlb_pending(tlbbatch);
170 
171 	put_cpu_var(ppc64_tlb_batch);
172 }
173 
174 /**
175  * __flush_hash_table_range - Flush all HPTEs for a given address range
176  *                            from the hash table (and the TLB). But keeps
177  *                            the linux PTEs intact.
178  *
179  * @mm		: mm_struct of the target address space (generally init_mm)
180  * @start	: starting address
181  * @end         : ending address (not included in the flush)
182  *
183  * This function is mostly to be used by some IO hotplug code in order
184  * to remove all hash entries from a given address range used to map IO
185  * space on a removed PCI-PCI bidge without tearing down the full mapping
186  * since 64K pages may overlap with other bridges when using 64K pages
187  * with 4K HW pages on IO space.
188  *
189  * Because of that usage pattern, it is implemented for small size rather
190  * than speed.
191  */
__flush_hash_table_range(struct mm_struct * mm,unsigned long start,unsigned long end)192 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
193 			      unsigned long end)
194 {
195 	bool is_thp;
196 	int hugepage_shift;
197 	unsigned long flags;
198 
199 	start = _ALIGN_DOWN(start, PAGE_SIZE);
200 	end = _ALIGN_UP(end, PAGE_SIZE);
201 
202 	BUG_ON(!mm->pgd);
203 
204 	/* Note: Normally, we should only ever use a batch within a
205 	 * PTE locked section. This violates the rule, but will work
206 	 * since we don't actually modify the PTEs, we just flush the
207 	 * hash while leaving the PTEs intact (including their reference
208 	 * to being hashed). This is not the most performance oriented
209 	 * way to do things but is fine for our needs here.
210 	 */
211 	local_irq_save(flags);
212 	arch_enter_lazy_mmu_mode();
213 	for (; start < end; start += PAGE_SIZE) {
214 		pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
215 						  &hugepage_shift);
216 		unsigned long pte;
217 
218 		if (ptep == NULL)
219 			continue;
220 		pte = pte_val(*ptep);
221 		if (is_thp)
222 			trace_hugepage_invalidate(start, pte);
223 		if (!(pte & H_PAGE_HASHPTE))
224 			continue;
225 		if (unlikely(is_thp))
226 			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
227 		else
228 			hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
229 	}
230 	arch_leave_lazy_mmu_mode();
231 	local_irq_restore(flags);
232 }
233 
flush_tlb_pmd_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr)234 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
235 {
236 	pte_t *pte;
237 	pte_t *start_pte;
238 	unsigned long flags;
239 
240 	addr = _ALIGN_DOWN(addr, PMD_SIZE);
241 	/* Note: Normally, we should only ever use a batch within a
242 	 * PTE locked section. This violates the rule, but will work
243 	 * since we don't actually modify the PTEs, we just flush the
244 	 * hash while leaving the PTEs intact (including their reference
245 	 * to being hashed). This is not the most performance oriented
246 	 * way to do things but is fine for our needs here.
247 	 */
248 	local_irq_save(flags);
249 	arch_enter_lazy_mmu_mode();
250 	start_pte = pte_offset_map(pmd, addr);
251 	for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
252 		unsigned long pteval = pte_val(*pte);
253 		if (pteval & H_PAGE_HASHPTE)
254 			hpte_need_flush(mm, addr, pte, pteval, 0);
255 		addr += PAGE_SIZE;
256 	}
257 	arch_leave_lazy_mmu_mode();
258 	local_irq_restore(flags);
259 }
260