1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31
32 int split_tlb __read_mostly;
33 int dcache_stride __read_mostly;
34 int icache_stride __read_mostly;
35 EXPORT_SYMBOL(dcache_stride);
36
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
41
42 /* On some machines (e.g. ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We put a spinlock around all TLB flushes to
45 * ensure this.
46 */
47 DEFINE_SPINLOCK(pa_tlb_lock);
48
49 struct pdc_cache_info cache_info __read_mostly;
50 #ifndef CONFIG_PA20
51 static struct pdc_btlb_info btlb_info __read_mostly;
52 #endif
53
54 #ifdef CONFIG_SMP
55 void
flush_data_cache(void)56 flush_data_cache(void)
57 {
58 on_each_cpu(flush_data_cache_local, NULL, 1);
59 }
60 void
flush_instruction_cache(void)61 flush_instruction_cache(void)
62 {
63 on_each_cpu(flush_instruction_cache_local, NULL, 1);
64 }
65 #endif
66
67 void
flush_cache_all_local(void)68 flush_cache_all_local(void)
69 {
70 flush_instruction_cache_local(NULL);
71 flush_data_cache_local(NULL);
72 }
73 EXPORT_SYMBOL(flush_cache_all_local);
74
75 /* Virtual address of pfn. */
76 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
77
78 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)79 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
80 {
81 unsigned long pfn = pte_pfn(*ptep);
82 struct page *page;
83
84 /* We don't have pte special. As a result, we can be called with
85 an invalid pfn and we don't need to flush the kernel dcache page.
86 This occurs with FireGL card in C8000. */
87 if (!pfn_valid(pfn))
88 return;
89
90 page = pfn_to_page(pfn);
91 if (page_mapping_file(page) &&
92 test_bit(PG_dcache_dirty, &page->flags)) {
93 flush_kernel_dcache_page_addr(pfn_va(pfn));
94 clear_bit(PG_dcache_dirty, &page->flags);
95 } else if (parisc_requires_coherency())
96 flush_kernel_dcache_page_addr(pfn_va(pfn));
97 }
98
99 void
show_cache_info(struct seq_file * m)100 show_cache_info(struct seq_file *m)
101 {
102 char buf[32];
103
104 seq_printf(m, "I-cache\t\t: %ld KB\n",
105 cache_info.ic_size/1024 );
106 if (cache_info.dc_loop != 1)
107 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
108 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
109 cache_info.dc_size/1024,
110 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
111 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
112 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
113 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
114 cache_info.it_size,
115 cache_info.dt_size,
116 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
117 );
118
119 #ifndef CONFIG_PA20
120 /* BTLB - Block TLB */
121 if (btlb_info.max_size==0) {
122 seq_printf(m, "BTLB\t\t: not supported\n" );
123 } else {
124 seq_printf(m,
125 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
126 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
127 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
128 btlb_info.max_size, (int)4096,
129 btlb_info.max_size>>8,
130 btlb_info.fixed_range_info.num_i,
131 btlb_info.fixed_range_info.num_d,
132 btlb_info.fixed_range_info.num_comb,
133 btlb_info.variable_range_info.num_i,
134 btlb_info.variable_range_info.num_d,
135 btlb_info.variable_range_info.num_comb
136 );
137 }
138 #endif
139 }
140
141 void __init
parisc_cache_init(void)142 parisc_cache_init(void)
143 {
144 if (pdc_cache_info(&cache_info) < 0)
145 panic("parisc_cache_init: pdc_cache_info failed");
146
147 #if 0
148 printk("ic_size %lx dc_size %lx it_size %lx\n",
149 cache_info.ic_size,
150 cache_info.dc_size,
151 cache_info.it_size);
152
153 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
154 cache_info.dc_base,
155 cache_info.dc_stride,
156 cache_info.dc_count,
157 cache_info.dc_loop);
158
159 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
160 *(unsigned long *) (&cache_info.dc_conf),
161 cache_info.dc_conf.cc_alias,
162 cache_info.dc_conf.cc_block,
163 cache_info.dc_conf.cc_line,
164 cache_info.dc_conf.cc_shift);
165 printk(" wt %d sh %d cst %d hv %d\n",
166 cache_info.dc_conf.cc_wt,
167 cache_info.dc_conf.cc_sh,
168 cache_info.dc_conf.cc_cst,
169 cache_info.dc_conf.cc_hv);
170
171 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
172 cache_info.ic_base,
173 cache_info.ic_stride,
174 cache_info.ic_count,
175 cache_info.ic_loop);
176
177 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
178 cache_info.it_sp_base,
179 cache_info.it_sp_stride,
180 cache_info.it_sp_count,
181 cache_info.it_loop,
182 cache_info.it_off_base,
183 cache_info.it_off_stride,
184 cache_info.it_off_count);
185
186 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
187 cache_info.dt_sp_base,
188 cache_info.dt_sp_stride,
189 cache_info.dt_sp_count,
190 cache_info.dt_loop,
191 cache_info.dt_off_base,
192 cache_info.dt_off_stride,
193 cache_info.dt_off_count);
194
195 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
196 *(unsigned long *) (&cache_info.ic_conf),
197 cache_info.ic_conf.cc_alias,
198 cache_info.ic_conf.cc_block,
199 cache_info.ic_conf.cc_line,
200 cache_info.ic_conf.cc_shift);
201 printk(" wt %d sh %d cst %d hv %d\n",
202 cache_info.ic_conf.cc_wt,
203 cache_info.ic_conf.cc_sh,
204 cache_info.ic_conf.cc_cst,
205 cache_info.ic_conf.cc_hv);
206
207 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
208 cache_info.dt_conf.tc_sh,
209 cache_info.dt_conf.tc_page,
210 cache_info.dt_conf.tc_cst,
211 cache_info.dt_conf.tc_aid,
212 cache_info.dt_conf.tc_sr);
213
214 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215 cache_info.it_conf.tc_sh,
216 cache_info.it_conf.tc_page,
217 cache_info.it_conf.tc_cst,
218 cache_info.it_conf.tc_aid,
219 cache_info.it_conf.tc_sr);
220 #endif
221
222 split_tlb = 0;
223 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
224 if (cache_info.dt_conf.tc_sh == 2)
225 printk(KERN_WARNING "Unexpected TLB configuration. "
226 "Will flush I/D separately (could be optimized).\n");
227
228 split_tlb = 1;
229 }
230
231 /* "New and Improved" version from Jim Hull
232 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
233 * The following CAFL_STRIDE is an optimized version, see
234 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
235 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
236 */
237 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
238 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
239 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
240 #undef CAFL_STRIDE
241
242 #ifndef CONFIG_PA20
243 if (pdc_btlb_info(&btlb_info) < 0) {
244 memset(&btlb_info, 0, sizeof btlb_info);
245 }
246 #endif
247
248 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
249 PDC_MODEL_NVA_UNSUPPORTED) {
250 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
251 #if 0
252 panic("SMP kernel required to avoid non-equivalent aliasing");
253 #endif
254 }
255 }
256
disable_sr_hashing(void)257 void __init disable_sr_hashing(void)
258 {
259 int srhash_type, retval;
260 unsigned long space_bits;
261
262 switch (boot_cpu_data.cpu_type) {
263 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
264 BUG();
265 return;
266
267 case pcxs:
268 case pcxt:
269 case pcxt_:
270 srhash_type = SRHASH_PCXST;
271 break;
272
273 case pcxl:
274 srhash_type = SRHASH_PCXL;
275 break;
276
277 case pcxl2: /* pcxl2 doesn't support space register hashing */
278 return;
279
280 default: /* Currently all PA2.0 machines use the same ins. sequence */
281 srhash_type = SRHASH_PA20;
282 break;
283 }
284
285 disable_sr_hashing_asm(srhash_type);
286
287 retval = pdc_spaceid_bits(&space_bits);
288 /* If this procedure isn't implemented, don't panic. */
289 if (retval < 0 && retval != PDC_BAD_OPTION)
290 panic("pdc_spaceid_bits call failed.\n");
291 if (space_bits != 0)
292 panic("SpaceID hashing is still on!\n");
293 }
294
295 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)296 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
297 unsigned long physaddr)
298 {
299 preempt_disable();
300 flush_dcache_page_asm(physaddr, vmaddr);
301 if (vma->vm_flags & VM_EXEC)
302 flush_icache_page_asm(physaddr, vmaddr);
303 preempt_enable();
304 }
305
flush_dcache_page(struct page * page)306 void flush_dcache_page(struct page *page)
307 {
308 struct address_space *mapping = page_mapping_file(page);
309 struct vm_area_struct *mpnt;
310 unsigned long offset;
311 unsigned long addr, old_addr = 0;
312 pgoff_t pgoff;
313
314 if (mapping && !mapping_mapped(mapping)) {
315 set_bit(PG_dcache_dirty, &page->flags);
316 return;
317 }
318
319 flush_kernel_dcache_page(page);
320
321 if (!mapping)
322 return;
323
324 pgoff = page->index;
325
326 /* We have carefully arranged in arch_get_unmapped_area() that
327 * *any* mappings of a file are always congruently mapped (whether
328 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
329 * to flush one address here for them all to become coherent */
330
331 flush_dcache_mmap_lock(mapping);
332 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
333 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
334 addr = mpnt->vm_start + offset;
335
336 /* The TLB is the engine of coherence on parisc: The
337 * CPU is entitled to speculate any page with a TLB
338 * mapping, so here we kill the mapping then flush the
339 * page along a special flush only alias mapping.
340 * This guarantees that the page is no-longer in the
341 * cache for any process and nor may it be
342 * speculatively read in (until the user or kernel
343 * specifically accesses it, of course) */
344
345 flush_tlb_page(mpnt, addr);
346 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
347 != (addr & (SHM_COLOUR - 1))) {
348 __flush_cache_page(mpnt, addr, page_to_phys(page));
349 if (old_addr)
350 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
351 old_addr = addr;
352 }
353 }
354 flush_dcache_mmap_unlock(mapping);
355 }
356 EXPORT_SYMBOL(flush_dcache_page);
357
358 /* Defined in arch/parisc/kernel/pacache.S */
359 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
360 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
361 EXPORT_SYMBOL(flush_data_cache_local);
362 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
363
364 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
365 static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
366
367 #define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
368 static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
369
parisc_setup_cache_timing(void)370 void __init parisc_setup_cache_timing(void)
371 {
372 unsigned long rangetime, alltime;
373 unsigned long size, start;
374 unsigned long threshold;
375
376 alltime = mfctl(16);
377 flush_data_cache();
378 alltime = mfctl(16) - alltime;
379
380 size = (unsigned long)(_end - _text);
381 rangetime = mfctl(16);
382 flush_kernel_dcache_range((unsigned long)_text, size);
383 rangetime = mfctl(16) - rangetime;
384
385 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
386 alltime, size, rangetime);
387
388 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
389 if (threshold > cache_info.dc_size)
390 threshold = cache_info.dc_size;
391 if (threshold)
392 parisc_cache_flush_threshold = threshold;
393 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
394 parisc_cache_flush_threshold/1024);
395
396 /* calculate TLB flush threshold */
397
398 /* On SMP machines, skip the TLB measure of kernel text which
399 * has been mapped as huge pages. */
400 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
401 threshold = max(cache_info.it_size, cache_info.dt_size);
402 threshold *= PAGE_SIZE;
403 threshold /= num_online_cpus();
404 goto set_tlb_threshold;
405 }
406
407 alltime = mfctl(16);
408 flush_tlb_all();
409 alltime = mfctl(16) - alltime;
410
411 size = 0;
412 start = (unsigned long) _text;
413 rangetime = mfctl(16);
414 while (start < (unsigned long) _end) {
415 flush_tlb_kernel_range(start, start + PAGE_SIZE);
416 start += PAGE_SIZE;
417 size += PAGE_SIZE;
418 }
419 rangetime = mfctl(16) - rangetime;
420
421 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
422 alltime, size, rangetime);
423
424 threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
425
426 set_tlb_threshold:
427 if (threshold)
428 parisc_tlb_flush_threshold = threshold;
429 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
430 parisc_tlb_flush_threshold/1024);
431 }
432
433 extern void purge_kernel_dcache_page_asm(unsigned long);
434 extern void clear_user_page_asm(void *, unsigned long);
435 extern void copy_user_page_asm(void *, void *, unsigned long);
436
flush_kernel_dcache_page_addr(void * addr)437 void flush_kernel_dcache_page_addr(void *addr)
438 {
439 unsigned long flags;
440
441 flush_kernel_dcache_page_asm(addr);
442 purge_tlb_start(flags);
443 pdtlb_kernel(addr);
444 purge_tlb_end(flags);
445 }
446 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
447
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)448 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
449 struct page *pg)
450 {
451 /* Copy using kernel mapping. No coherency is needed (all in
452 kunmap) for the `to' page. However, the `from' page needs to
453 be flushed through a mapping equivalent to the user mapping
454 before it can be accessed through the kernel mapping. */
455 preempt_disable();
456 flush_dcache_page_asm(__pa(vfrom), vaddr);
457 copy_page_asm(vto, vfrom);
458 preempt_enable();
459 }
460 EXPORT_SYMBOL(copy_user_page);
461
462 /* __flush_tlb_range()
463 *
464 * returns 1 if all TLBs were flushed.
465 */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)466 int __flush_tlb_range(unsigned long sid, unsigned long start,
467 unsigned long end)
468 {
469 unsigned long flags;
470
471 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
472 end - start >= parisc_tlb_flush_threshold) {
473 flush_tlb_all();
474 return 1;
475 }
476
477 /* Purge TLB entries for small ranges using the pdtlb and
478 pitlb instructions. These instructions execute locally
479 but cause a purge request to be broadcast to other TLBs. */
480 if (likely(!split_tlb)) {
481 while (start < end) {
482 purge_tlb_start(flags);
483 mtsp(sid, 1);
484 pdtlb(start);
485 purge_tlb_end(flags);
486 start += PAGE_SIZE;
487 }
488 return 0;
489 }
490
491 /* split TLB case */
492 while (start < end) {
493 purge_tlb_start(flags);
494 mtsp(sid, 1);
495 pdtlb(start);
496 pitlb(start);
497 purge_tlb_end(flags);
498 start += PAGE_SIZE;
499 }
500 return 0;
501 }
502
cacheflush_h_tmp_function(void * dummy)503 static void cacheflush_h_tmp_function(void *dummy)
504 {
505 flush_cache_all_local();
506 }
507
flush_cache_all(void)508 void flush_cache_all(void)
509 {
510 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
511 }
512
mm_total_size(struct mm_struct * mm)513 static inline unsigned long mm_total_size(struct mm_struct *mm)
514 {
515 struct vm_area_struct *vma;
516 unsigned long usize = 0;
517
518 for (vma = mm->mmap; vma; vma = vma->vm_next)
519 usize += vma->vm_end - vma->vm_start;
520 return usize;
521 }
522
get_ptep(pgd_t * pgd,unsigned long addr)523 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
524 {
525 pte_t *ptep = NULL;
526
527 if (!pgd_none(*pgd)) {
528 pud_t *pud = pud_offset(pgd, addr);
529 if (!pud_none(*pud)) {
530 pmd_t *pmd = pmd_offset(pud, addr);
531 if (!pmd_none(*pmd))
532 ptep = pte_offset_map(pmd, addr);
533 }
534 }
535 return ptep;
536 }
537
flush_cache_mm(struct mm_struct * mm)538 void flush_cache_mm(struct mm_struct *mm)
539 {
540 struct vm_area_struct *vma;
541 pgd_t *pgd;
542
543 /* Flushing the whole cache on each cpu takes forever on
544 rp3440, etc. So, avoid it if the mm isn't too big. */
545 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
546 mm_total_size(mm) >= parisc_cache_flush_threshold) {
547 if (mm->context)
548 flush_tlb_all();
549 flush_cache_all();
550 return;
551 }
552
553 if (mm->context == mfsp(3)) {
554 for (vma = mm->mmap; vma; vma = vma->vm_next) {
555 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
556 if (vma->vm_flags & VM_EXEC)
557 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
558 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
559 }
560 return;
561 }
562
563 pgd = mm->pgd;
564 for (vma = mm->mmap; vma; vma = vma->vm_next) {
565 unsigned long addr;
566
567 for (addr = vma->vm_start; addr < vma->vm_end;
568 addr += PAGE_SIZE) {
569 unsigned long pfn;
570 pte_t *ptep = get_ptep(pgd, addr);
571 if (!ptep)
572 continue;
573 pfn = pte_pfn(*ptep);
574 if (!pfn_valid(pfn))
575 continue;
576 if (unlikely(mm->context))
577 flush_tlb_page(vma, addr);
578 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
579 }
580 }
581 }
582
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)583 void flush_cache_range(struct vm_area_struct *vma,
584 unsigned long start, unsigned long end)
585 {
586 pgd_t *pgd;
587 unsigned long addr;
588
589 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
590 end - start >= parisc_cache_flush_threshold) {
591 if (vma->vm_mm->context)
592 flush_tlb_range(vma, start, end);
593 flush_cache_all();
594 return;
595 }
596
597 if (vma->vm_mm->context == mfsp(3)) {
598 flush_user_dcache_range_asm(start, end);
599 if (vma->vm_flags & VM_EXEC)
600 flush_user_icache_range_asm(start, end);
601 flush_tlb_range(vma, start, end);
602 return;
603 }
604
605 pgd = vma->vm_mm->pgd;
606 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
607 unsigned long pfn;
608 pte_t *ptep = get_ptep(pgd, addr);
609 if (!ptep)
610 continue;
611 pfn = pte_pfn(*ptep);
612 if (pfn_valid(pfn)) {
613 if (unlikely(vma->vm_mm->context))
614 flush_tlb_page(vma, addr);
615 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
616 }
617 }
618 }
619
620 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)621 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
622 {
623 if (pfn_valid(pfn)) {
624 if (likely(vma->vm_mm->context))
625 flush_tlb_page(vma, vmaddr);
626 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
627 }
628 }
629
flush_kernel_vmap_range(void * vaddr,int size)630 void flush_kernel_vmap_range(void *vaddr, int size)
631 {
632 unsigned long start = (unsigned long)vaddr;
633 unsigned long end = start + size;
634
635 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
636 (unsigned long)size >= parisc_cache_flush_threshold) {
637 flush_tlb_kernel_range(start, end);
638 flush_data_cache();
639 return;
640 }
641
642 flush_kernel_dcache_range_asm(start, end);
643 flush_tlb_kernel_range(start, end);
644 }
645 EXPORT_SYMBOL(flush_kernel_vmap_range);
646
invalidate_kernel_vmap_range(void * vaddr,int size)647 void invalidate_kernel_vmap_range(void *vaddr, int size)
648 {
649 unsigned long start = (unsigned long)vaddr;
650 unsigned long end = start + size;
651
652 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
653 (unsigned long)size >= parisc_cache_flush_threshold) {
654 flush_tlb_kernel_range(start, end);
655 flush_data_cache();
656 return;
657 }
658
659 purge_kernel_dcache_range_asm(start, end);
660 flush_tlb_kernel_range(start, end);
661 }
662 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
663