1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_CACHEFLUSH_H
10 #define _ASM_CACHEFLUSH_H
11
12 /* Keep includes the same across arches. */
13 #include <linux/mm.h>
14 #include <asm/cpu-features.h>
15
16 /* Cache flushing:
17 *
18 * - flush_cache_all() flushes entire cache
19 * - flush_cache_mm(mm) flushes the specified mm context's cache lines
20 * - flush_cache_dup mm(mm) handles cache flushing when forking
21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
22 * - flush_cache_range(vma, start, end) flushes a range of pages
23 * - flush_icache_range(start, end) flush a range of instructions
24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
25 *
26 * MIPS specific flush operations:
27 *
28 * - flush_cache_sigtramp() flush signal trampoline
29 * - flush_icache_all() flush the entire instruction cache
30 * - flush_data_cache_page() flushes a page from the data cache
31 * - __flush_icache_user_range(start, end) flushes range of user instructions
32 */
33
34 /*
35 * This flag is used to indicate that the page pointed to by a pte
36 * is dirty and requires cleaning before returning it to the user.
37 */
38 #define PG_dcache_dirty PG_arch_1
39
40 #define Page_dcache_dirty(page) \
41 test_bit(PG_dcache_dirty, &(page)->flags)
42 #define SetPageDcacheDirty(page) \
43 set_bit(PG_dcache_dirty, &(page)->flags)
44 #define ClearPageDcacheDirty(page) \
45 clear_bit(PG_dcache_dirty, &(page)->flags)
46
47 extern void (*flush_cache_all)(void);
48 extern void (*__flush_cache_all)(void);
49 extern void (*flush_cache_mm)(struct mm_struct *mm);
50 #define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
51 extern void (*flush_cache_range)(struct vm_area_struct *vma,
52 unsigned long start, unsigned long end);
53 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
54 extern void __flush_dcache_page(struct page *page);
55
56 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
flush_dcache_page(struct page * page)57 static inline void flush_dcache_page(struct page *page)
58 {
59 if (cpu_has_dc_aliases)
60 __flush_dcache_page(page);
61 else if (!cpu_has_ic_fills_f_dc)
62 SetPageDcacheDirty(page);
63 }
64
65 #define flush_dcache_mmap_lock(mapping) do { } while (0)
66 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
67
68 #define ARCH_HAS_FLUSH_ANON_PAGE
69 extern void __flush_anon_page(struct page *, unsigned long);
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)70 static inline void flush_anon_page(struct vm_area_struct *vma,
71 struct page *page, unsigned long vmaddr)
72 {
73 if (cpu_has_dc_aliases && PageAnon(page))
74 __flush_anon_page(page, vmaddr);
75 }
76
flush_icache_page(struct vm_area_struct * vma,struct page * page)77 static inline void flush_icache_page(struct vm_area_struct *vma,
78 struct page *page)
79 {
80 }
81
82 extern void (*flush_icache_range)(unsigned long start, unsigned long end);
83 extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
84 extern void (*__flush_icache_user_range)(unsigned long start,
85 unsigned long end);
86 extern void (*__local_flush_icache_user_range)(unsigned long start,
87 unsigned long end);
88
89 extern void (*__flush_cache_vmap)(void);
90
flush_cache_vmap(unsigned long start,unsigned long end)91 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
92 {
93 if (cpu_has_dc_aliases)
94 __flush_cache_vmap();
95 }
96
97 extern void (*__flush_cache_vunmap)(void);
98
flush_cache_vunmap(unsigned long start,unsigned long end)99 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
100 {
101 if (cpu_has_dc_aliases)
102 __flush_cache_vunmap();
103 }
104
105 extern void copy_to_user_page(struct vm_area_struct *vma,
106 struct page *page, unsigned long vaddr, void *dst, const void *src,
107 unsigned long len);
108
109 extern void copy_from_user_page(struct vm_area_struct *vma,
110 struct page *page, unsigned long vaddr, void *dst, const void *src,
111 unsigned long len);
112
113 extern void (*flush_cache_sigtramp)(unsigned long addr);
114 extern void (*flush_icache_all)(void);
115 extern void (*local_flush_data_cache_page)(void * addr);
116 extern void (*flush_data_cache_page)(unsigned long addr);
117
118 /* Run kernel code uncached, useful for cache probing functions. */
119 unsigned long run_uncached(void *func);
120
121 extern void *kmap_coherent(struct page *page, unsigned long addr);
122 extern void kunmap_coherent(void);
123 extern void *kmap_noncoherent(struct page *page, unsigned long addr);
124
kunmap_noncoherent(void)125 static inline void kunmap_noncoherent(void)
126 {
127 kunmap_coherent();
128 }
129
130 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page * page)131 static inline void flush_kernel_dcache_page(struct page *page)
132 {
133 BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
134 flush_dcache_page(page);
135 }
136
137 /*
138 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
139 * cache writeback and invalidate operation.
140 */
141 extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
142
flush_kernel_vmap_range(void * vaddr,int size)143 static inline void flush_kernel_vmap_range(void *vaddr, int size)
144 {
145 if (cpu_has_dc_aliases)
146 __flush_kernel_vmap_range((unsigned long) vaddr, size);
147 }
148
invalidate_kernel_vmap_range(void * vaddr,int size)149 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
150 {
151 if (cpu_has_dc_aliases)
152 __flush_kernel_vmap_range((unsigned long) vaddr, size);
153 }
154
155 #endif /* _ASM_CACHEFLUSH_H */
156