1 /*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16 #include <linux/init.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/bootmem.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
30 #include <asm/tlb.h>
31 #include <asm/tlbflush.h>
32 #include <asm/page.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35
36 /*
37 * Note:
38 * The kernel provides one architecture bit PG_arch_1 in the page flags that
39 * can be used for cache coherency.
40 *
41 * I$-D$ coherency.
42 *
43 * The Xtensa architecture doesn't keep the instruction cache coherent with
44 * the data cache. We use the architecture bit to indicate if the caches
45 * are coherent. The kernel clears this bit whenever a page is added to the
46 * page cache. At that time, the caches might not be in sync. We, therefore,
47 * define this flag as 'clean' if set.
48 *
49 * D-cache aliasing.
50 *
51 * With cache aliasing, we have to always flush the cache when pages are
52 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
53 * page.
54 *
55 *
56 *
57 */
58
59 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)60 static inline void kmap_invalidate_coherent(struct page *page,
61 unsigned long vaddr)
62 {
63 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
64 unsigned long kvaddr;
65
66 if (!PageHighMem(page)) {
67 kvaddr = (unsigned long)page_to_virt(page);
68
69 __invalidate_dcache_page(kvaddr);
70 } else {
71 kvaddr = TLBTEMP_BASE_1 +
72 (page_to_phys(page) & DCACHE_ALIAS_MASK);
73
74 __invalidate_dcache_page_alias(kvaddr,
75 page_to_phys(page));
76 }
77 }
78 }
79
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)80 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
81 unsigned long vaddr, unsigned long *paddr)
82 {
83 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
84 *paddr = page_to_phys(page);
85 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
86 } else {
87 *paddr = 0;
88 return page_to_virt(page);
89 }
90 }
91
clear_user_highpage(struct page * page,unsigned long vaddr)92 void clear_user_highpage(struct page *page, unsigned long vaddr)
93 {
94 unsigned long paddr;
95 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
96
97 preempt_disable();
98 kmap_invalidate_coherent(page, vaddr);
99 set_bit(PG_arch_1, &page->flags);
100 clear_page_alias(kvaddr, paddr);
101 preempt_enable();
102 }
103 EXPORT_SYMBOL(clear_user_highpage);
104
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)105 void copy_user_highpage(struct page *dst, struct page *src,
106 unsigned long vaddr, struct vm_area_struct *vma)
107 {
108 unsigned long dst_paddr, src_paddr;
109 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
110 &dst_paddr);
111 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
112 &src_paddr);
113
114 preempt_disable();
115 kmap_invalidate_coherent(dst, vaddr);
116 set_bit(PG_arch_1, &dst->flags);
117 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
118 preempt_enable();
119 }
120 EXPORT_SYMBOL(copy_user_highpage);
121
122 /*
123 * Any time the kernel writes to a user page cache page, or it is about to
124 * read from a page cache page this routine is called.
125 *
126 */
127
flush_dcache_page(struct page * page)128 void flush_dcache_page(struct page *page)
129 {
130 struct address_space *mapping = page_mapping_file(page);
131
132 /*
133 * If we have a mapping but the page is not mapped to user-space
134 * yet, we simply mark this page dirty and defer flushing the
135 * caches until update_mmu().
136 */
137
138 if (mapping && !mapping_mapped(mapping)) {
139 if (!test_bit(PG_arch_1, &page->flags))
140 set_bit(PG_arch_1, &page->flags);
141 return;
142
143 } else {
144
145 unsigned long phys = page_to_phys(page);
146 unsigned long temp = page->index << PAGE_SHIFT;
147 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
148 unsigned long virt;
149
150 /*
151 * Flush the page in kernel space and user space.
152 * Note that we can omit that step if aliasing is not
153 * an issue, but we do have to synchronize I$ and D$
154 * if we have a mapping.
155 */
156
157 if (!alias && !mapping)
158 return;
159
160 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
161 __flush_invalidate_dcache_page_alias(virt, phys);
162
163 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
164
165 if (alias)
166 __flush_invalidate_dcache_page_alias(virt, phys);
167
168 if (mapping)
169 __invalidate_icache_page_alias(virt, phys);
170 }
171
172 /* There shouldn't be an entry in the cache for this page anymore. */
173 }
174 EXPORT_SYMBOL(flush_dcache_page);
175
176 /*
177 * For now, flush the whole cache. FIXME??
178 */
179
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)180 void local_flush_cache_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
182 {
183 __flush_invalidate_dcache_all();
184 __invalidate_icache_all();
185 }
186 EXPORT_SYMBOL(local_flush_cache_range);
187
188 /*
189 * Remove any entry in the cache for this page.
190 *
191 * Note that this function is only called for user pages, so use the
192 * alias versions of the cache flush functions.
193 */
194
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)195 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
196 unsigned long pfn)
197 {
198 /* Note that we have to use the 'alias' address to avoid multi-hit */
199
200 unsigned long phys = page_to_phys(pfn_to_page(pfn));
201 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
202
203 __flush_invalidate_dcache_page_alias(virt, phys);
204 __invalidate_icache_page_alias(virt, phys);
205 }
206 EXPORT_SYMBOL(local_flush_cache_page);
207
208 #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
209
210 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)211 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
212 {
213 unsigned long pfn = pte_pfn(*ptep);
214 struct page *page;
215
216 if (!pfn_valid(pfn))
217 return;
218
219 page = pfn_to_page(pfn);
220
221 /* Invalidate old entry in TLBs */
222
223 flush_tlb_page(vma, addr);
224
225 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
226
227 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
228 unsigned long phys = page_to_phys(page);
229 unsigned long tmp;
230
231 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
232 __flush_invalidate_dcache_page_alias(tmp, phys);
233 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
234 __flush_invalidate_dcache_page_alias(tmp, phys);
235 __invalidate_icache_page_alias(tmp, phys);
236
237 clear_bit(PG_arch_1, &page->flags);
238 }
239 #else
240 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
241 && (vma->vm_flags & VM_EXEC) != 0) {
242 unsigned long paddr = (unsigned long)kmap_atomic(page);
243 __flush_dcache_page(paddr);
244 __invalidate_icache_page(paddr);
245 set_bit(PG_arch_1, &page->flags);
246 kunmap_atomic((void *)paddr);
247 }
248 #endif
249 }
250
251 /*
252 * access_process_vm() has called get_user_pages(), which has done a
253 * flush_dcache_page() on the page.
254 */
255
256 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
257
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)258 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
259 unsigned long vaddr, void *dst, const void *src,
260 unsigned long len)
261 {
262 unsigned long phys = page_to_phys(page);
263 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
264
265 /* Flush and invalidate user page if aliased. */
266
267 if (alias) {
268 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
269 __flush_invalidate_dcache_page_alias(t, phys);
270 }
271
272 /* Copy data */
273
274 memcpy(dst, src, len);
275
276 /*
277 * Flush and invalidate kernel page if aliased and synchronize
278 * data and instruction caches for executable pages.
279 */
280
281 if (alias) {
282 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
283
284 __flush_invalidate_dcache_range((unsigned long) dst, len);
285 if ((vma->vm_flags & VM_EXEC) != 0)
286 __invalidate_icache_page_alias(t, phys);
287
288 } else if ((vma->vm_flags & VM_EXEC) != 0) {
289 __flush_dcache_range((unsigned long)dst,len);
290 __invalidate_icache_range((unsigned long) dst, len);
291 }
292 }
293
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)294 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
295 unsigned long vaddr, void *dst, const void *src,
296 unsigned long len)
297 {
298 unsigned long phys = page_to_phys(page);
299 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
300
301 /*
302 * Flush user page if aliased.
303 * (Note: a simply flush would be sufficient)
304 */
305
306 if (alias) {
307 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
308 __flush_invalidate_dcache_page_alias(t, phys);
309 }
310
311 memcpy(dst, src, len);
312 }
313
314 #endif
315