1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm/mm/flush.c
4 *
5 * Copyright (C) 1995-2002 Russell King
6 */
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/highmem.h>
11
12 #include <asm/cacheflush.h>
13 #include <asm/cachetype.h>
14 #include <asm/highmem.h>
15 #include <asm/smp_plat.h>
16 #include <asm/tlbflush.h>
17 #include <linux/hugetlb.h>
18
19 #include "mm.h"
20
21 #ifdef CONFIG_ARM_HEAVY_MB
22 void (*soc_mb)(void);
23
arm_heavy_mb(void)24 void arm_heavy_mb(void)
25 {
26 #ifdef CONFIG_OUTER_CACHE_SYNC
27 if (outer_cache.sync)
28 outer_cache.sync();
29 #endif
30 if (soc_mb)
31 soc_mb();
32 }
33 EXPORT_SYMBOL(arm_heavy_mb);
34 #endif
35
36 #ifdef CONFIG_CPU_CACHE_VIPT
37
flush_pfn_alias(unsigned long pfn,unsigned long vaddr)38 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39 {
40 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
41 const int zero = 0;
42
43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
44
45 asm( "mcrr p15, 0, %1, %0, c14\n"
46 " mcr p15, 0, %2, c7, c10, 4"
47 :
48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
49 : "cc");
50 }
51
flush_icache_alias(unsigned long pfn,unsigned long vaddr,unsigned long len)52 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
53 {
54 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
55 unsigned long offset = vaddr & (PAGE_SIZE - 1);
56 unsigned long to;
57
58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
59 to = va + offset;
60 flush_icache_range(to, to + len);
61 }
62
flush_cache_mm(struct mm_struct * mm)63 void flush_cache_mm(struct mm_struct *mm)
64 {
65 if (cache_is_vivt()) {
66 vivt_flush_cache_mm(mm);
67 return;
68 }
69
70 if (cache_is_vipt_aliasing()) {
71 asm( "mcr p15, 0, %0, c7, c14, 0\n"
72 " mcr p15, 0, %0, c7, c10, 4"
73 :
74 : "r" (0)
75 : "cc");
76 }
77 }
78
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)79 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
80 {
81 if (cache_is_vivt()) {
82 vivt_flush_cache_range(vma, start, end);
83 return;
84 }
85
86 if (cache_is_vipt_aliasing()) {
87 asm( "mcr p15, 0, %0, c7, c14, 0\n"
88 " mcr p15, 0, %0, c7, c10, 4"
89 :
90 : "r" (0)
91 : "cc");
92 }
93
94 if (vma->vm_flags & VM_EXEC)
95 __flush_icache_all();
96 }
97
flush_cache_pages(struct vm_area_struct * vma,unsigned long user_addr,unsigned long pfn,unsigned int nr)98 void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsigned int nr)
99 {
100 if (cache_is_vivt()) {
101 vivt_flush_cache_pages(vma, user_addr, pfn, nr);
102 return;
103 }
104
105 if (cache_is_vipt_aliasing()) {
106 flush_pfn_alias(pfn, user_addr);
107 __flush_icache_all();
108 }
109
110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
111 __flush_icache_all();
112 }
113
114 #else
115 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
117 #endif
118
119 #define FLAG_PA_IS_EXEC 1
120 #define FLAG_PA_CORE_IN_MM 2
121
flush_ptrace_access_other(void * args)122 static void flush_ptrace_access_other(void *args)
123 {
124 __flush_icache_all();
125 }
126
127 static inline
__flush_ptrace_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len,unsigned int flags)128 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
129 unsigned long len, unsigned int flags)
130 {
131 if (cache_is_vivt()) {
132 if (flags & FLAG_PA_CORE_IN_MM) {
133 unsigned long addr = (unsigned long)kaddr;
134 __cpuc_coherent_kern_range(addr, addr + len);
135 }
136 return;
137 }
138
139 if (cache_is_vipt_aliasing()) {
140 flush_pfn_alias(page_to_pfn(page), uaddr);
141 __flush_icache_all();
142 return;
143 }
144
145 /* VIPT non-aliasing D-cache */
146 if (flags & FLAG_PA_IS_EXEC) {
147 unsigned long addr = (unsigned long)kaddr;
148 if (icache_is_vipt_aliasing())
149 flush_icache_alias(page_to_pfn(page), uaddr, len);
150 else
151 __cpuc_coherent_kern_range(addr, addr + len);
152 if (cache_ops_need_broadcast())
153 smp_call_function(flush_ptrace_access_other,
154 NULL, 1);
155 }
156 }
157
158 static
flush_ptrace_access(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)159 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
160 unsigned long uaddr, void *kaddr, unsigned long len)
161 {
162 unsigned int flags = 0;
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
164 flags |= FLAG_PA_CORE_IN_MM;
165 if (vma->vm_flags & VM_EXEC)
166 flags |= FLAG_PA_IS_EXEC;
167 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
168 }
169
flush_uprobe_xol_access(struct page * page,unsigned long uaddr,void * kaddr,unsigned long len)170 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
171 void *kaddr, unsigned long len)
172 {
173 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
174
175 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
176 }
177
178 /*
179 * Copy user data from/to a page which is mapped into a different
180 * processes address space. Really, we want to allow our "user
181 * space" model to handle this.
182 *
183 * Note that this code needs to run on the current CPU.
184 */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)185 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
186 unsigned long uaddr, void *dst, const void *src,
187 unsigned long len)
188 {
189 #ifdef CONFIG_SMP
190 preempt_disable();
191 #endif
192 memcpy(dst, src, len);
193 flush_ptrace_access(vma, page, uaddr, dst, len);
194 #ifdef CONFIG_SMP
195 preempt_enable();
196 #endif
197 }
198
__flush_dcache_folio(struct address_space * mapping,struct folio * folio)199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio)
200 {
201 /*
202 * Writeback any data associated with the kernel mapping of this
203 * page. This ensures that data in the physical page is mutually
204 * coherent with the kernels mapping.
205 */
206 if (!folio_test_highmem(folio)) {
207 __cpuc_flush_dcache_area(folio_address(folio),
208 folio_size(folio));
209 } else {
210 unsigned long i;
211 if (cache_is_vipt_nonaliasing()) {
212 for (i = 0; i < folio_nr_pages(folio); i++) {
213 void *addr = kmap_local_folio(folio,
214 i * PAGE_SIZE);
215 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
216 kunmap_local(addr);
217 }
218 } else {
219 for (i = 0; i < folio_nr_pages(folio); i++) {
220 void *addr = kmap_high_get(folio_page(folio, i));
221 if (addr) {
222 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
223 kunmap_high(folio_page(folio, i));
224 }
225 }
226 }
227 }
228
229 /*
230 * If this is a page cache page, and we have an aliasing VIPT cache,
231 * we only need to do one flush - which would be at the relevant
232 * userspace colour, which is congruent with page->index.
233 */
234 if (mapping && cache_is_vipt_aliasing())
235 flush_pfn_alias(folio_pfn(folio), folio_pos(folio));
236 }
237
__flush_dcache_aliases(struct address_space * mapping,struct folio * folio)238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio)
239 {
240 struct mm_struct *mm = current->active_mm;
241 struct vm_area_struct *vma;
242 pgoff_t pgoff, pgoff_end;
243
244 /*
245 * There are possible user space mappings of this page:
246 * - VIVT cache: we need to also write back and invalidate all user
247 * data in the current VM view associated with this page.
248 * - aliasing VIPT: we only need to find one mapping of this page.
249 */
250 pgoff = folio->index;
251 pgoff_end = pgoff + folio_nr_pages(folio) - 1;
252
253 flush_dcache_mmap_lock(mapping);
254 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff_end) {
255 unsigned long start, offset, pfn;
256 unsigned int nr;
257
258 /*
259 * If this VMA is not in our MM, we can ignore it.
260 */
261 if (vma->vm_mm != mm)
262 continue;
263 if (!(vma->vm_flags & VM_MAYSHARE))
264 continue;
265
266 start = vma->vm_start;
267 pfn = folio_pfn(folio);
268 nr = folio_nr_pages(folio);
269 offset = pgoff - vma->vm_pgoff;
270 if (offset > -nr) {
271 pfn -= offset;
272 nr += offset;
273 } else {
274 start += offset * PAGE_SIZE;
275 }
276 if (start + nr * PAGE_SIZE > vma->vm_end)
277 nr = (vma->vm_end - start) / PAGE_SIZE;
278
279 flush_cache_pages(vma, start, pfn, nr);
280 }
281 flush_dcache_mmap_unlock(mapping);
282 }
283
284 #if __LINUX_ARM_ARCH__ >= 6
__sync_icache_dcache(pte_t pteval)285 void __sync_icache_dcache(pte_t pteval)
286 {
287 unsigned long pfn;
288 struct folio *folio;
289 struct address_space *mapping;
290
291 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
292 /* only flush non-aliasing VIPT caches for exec mappings */
293 return;
294 pfn = pte_pfn(pteval);
295 if (!pfn_valid(pfn))
296 return;
297
298 folio = page_folio(pfn_to_page(pfn));
299 if (cache_is_vipt_aliasing())
300 mapping = folio_flush_mapping(folio);
301 else
302 mapping = NULL;
303
304 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
305 __flush_dcache_folio(mapping, folio);
306
307 if (pte_exec(pteval))
308 __flush_icache_all();
309 }
310 #endif
311
312 /*
313 * Ensure cache coherency between kernel mapping and userspace mapping
314 * of this page.
315 *
316 * We have three cases to consider:
317 * - VIPT non-aliasing cache: fully coherent so nothing required.
318 * - VIVT: fully aliasing, so we need to handle every alias in our
319 * current VM view.
320 * - VIPT aliasing: need to handle one alias in our current VM view.
321 *
322 * If we need to handle aliasing:
323 * If the page only exists in the page cache and there are no user
324 * space mappings, we can be lazy and remember that we may have dirty
325 * kernel cache lines for later. Otherwise, we assume we have
326 * aliasing mappings.
327 *
328 * Note that we disable the lazy flush for SMP configurations where
329 * the cache maintenance operations are not automatically broadcasted.
330 */
flush_dcache_folio(struct folio * folio)331 void flush_dcache_folio(struct folio *folio)
332 {
333 struct address_space *mapping;
334
335 /*
336 * The zero page is never written to, so never has any dirty
337 * cache lines, and therefore never needs to be flushed.
338 */
339 if (is_zero_pfn(folio_pfn(folio)))
340 return;
341
342 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
343 if (test_bit(PG_dcache_clean, &folio->flags))
344 clear_bit(PG_dcache_clean, &folio->flags);
345 return;
346 }
347
348 mapping = folio_flush_mapping(folio);
349
350 if (!cache_ops_need_broadcast() &&
351 mapping && !folio_mapped(folio))
352 clear_bit(PG_dcache_clean, &folio->flags);
353 else {
354 __flush_dcache_folio(mapping, folio);
355 if (mapping && cache_is_vivt())
356 __flush_dcache_aliases(mapping, folio);
357 else if (mapping)
358 __flush_icache_all();
359 set_bit(PG_dcache_clean, &folio->flags);
360 }
361 }
362 EXPORT_SYMBOL(flush_dcache_folio);
363
flush_dcache_page(struct page * page)364 void flush_dcache_page(struct page *page)
365 {
366 flush_dcache_folio(page_folio(page));
367 }
368 EXPORT_SYMBOL(flush_dcache_page);
369 /*
370 * Flush an anonymous page so that users of get_user_pages()
371 * can safely access the data. The expected sequence is:
372 *
373 * get_user_pages()
374 * -> flush_anon_page
375 * memcpy() to/from page
376 * if written to page, flush_dcache_page()
377 */
378 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
__flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)379 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
380 {
381 unsigned long pfn;
382
383 /* VIPT non-aliasing caches need do nothing */
384 if (cache_is_vipt_nonaliasing())
385 return;
386
387 /*
388 * Write back and invalidate userspace mapping.
389 */
390 pfn = page_to_pfn(page);
391 if (cache_is_vivt()) {
392 flush_cache_page(vma, vmaddr, pfn);
393 } else {
394 /*
395 * For aliasing VIPT, we can flush an alias of the
396 * userspace address only.
397 */
398 flush_pfn_alias(pfn, vmaddr);
399 __flush_icache_all();
400 }
401
402 /*
403 * Invalidate kernel mapping. No data should be contained
404 * in this mapping of the page. FIXME: this is overkill
405 * since we actually ask for a write-back and invalidate.
406 */
407 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
408 }
409