1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005-2007 Cavium Networks
7 */
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/page.h>
23 #include <asm/r4kcache.h>
24 #include <asm/traps.h>
25 #include <asm/mmu_context.h>
26 #include <asm/war.h>
27
28 #include <asm/octeon/octeon.h>
29
30 unsigned long long cache_err_dcache[NR_CPUS];
31 EXPORT_SYMBOL_GPL(cache_err_dcache);
32
33 /**
34 * Octeon automatically flushes the dcache on tlb changes, so
35 * from Linux's viewpoint it acts much like a physically
36 * tagged cache. No flushing is needed
37 *
38 */
octeon_flush_data_cache_page(unsigned long addr)39 static void octeon_flush_data_cache_page(unsigned long addr)
40 {
41 /* Nothing to do */
42 }
43
octeon_local_flush_icache(void)44 static inline void octeon_local_flush_icache(void)
45 {
46 asm volatile ("synci 0($0)");
47 }
48
49 /*
50 * Flush local I-cache for the specified range.
51 */
local_octeon_flush_icache_range(unsigned long start,unsigned long end)52 static void local_octeon_flush_icache_range(unsigned long start,
53 unsigned long end)
54 {
55 octeon_local_flush_icache();
56 }
57
58 /**
59 * Flush caches as necessary for all cores affected by a
60 * vma. If no vma is supplied, all cores are flushed.
61 *
62 * @vma: VMA to flush or NULL to flush all icaches.
63 */
octeon_flush_icache_all_cores(struct vm_area_struct * vma)64 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
65 {
66 extern void octeon_send_ipi_single(int cpu, unsigned int action);
67 #ifdef CONFIG_SMP
68 int cpu;
69 cpumask_t mask;
70 #endif
71
72 mb();
73 octeon_local_flush_icache();
74 #ifdef CONFIG_SMP
75 preempt_disable();
76 cpu = smp_processor_id();
77
78 /*
79 * If we have a vma structure, we only need to worry about
80 * cores it has been used on
81 */
82 if (vma)
83 mask = *mm_cpumask(vma->vm_mm);
84 else
85 mask = *cpu_online_mask;
86 cpumask_clear_cpu(cpu, &mask);
87 for_each_cpu(cpu, &mask)
88 octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
89
90 preempt_enable();
91 #endif
92 }
93
94
95 /**
96 * Called to flush the icache on all cores
97 */
octeon_flush_icache_all(void)98 static void octeon_flush_icache_all(void)
99 {
100 octeon_flush_icache_all_cores(NULL);
101 }
102
103
104 /**
105 * Called to flush all memory associated with a memory
106 * context.
107 *
108 * @mm: Memory context to flush
109 */
octeon_flush_cache_mm(struct mm_struct * mm)110 static void octeon_flush_cache_mm(struct mm_struct *mm)
111 {
112 /*
113 * According to the R4K version of this file, CPUs without
114 * dcache aliases don't need to do anything here
115 */
116 }
117
118
119 /**
120 * Flush a range of kernel addresses out of the icache
121 *
122 */
octeon_flush_icache_range(unsigned long start,unsigned long end)123 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
124 {
125 octeon_flush_icache_all_cores(NULL);
126 }
127
128
129 /**
130 * Flush a range out of a vma
131 *
132 * @vma: VMA to flush
133 * @start:
134 * @end:
135 */
octeon_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)136 static void octeon_flush_cache_range(struct vm_area_struct *vma,
137 unsigned long start, unsigned long end)
138 {
139 if (vma->vm_flags & VM_EXEC)
140 octeon_flush_icache_all_cores(vma);
141 }
142
143
144 /**
145 * Flush a specific page of a vma
146 *
147 * @vma: VMA to flush page for
148 * @page: Page to flush
149 * @pfn:
150 */
octeon_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)151 static void octeon_flush_cache_page(struct vm_area_struct *vma,
152 unsigned long page, unsigned long pfn)
153 {
154 if (vma->vm_flags & VM_EXEC)
155 octeon_flush_icache_all_cores(vma);
156 }
157
octeon_flush_kernel_vmap_range(unsigned long vaddr,int size)158 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
159 {
160 BUG();
161 }
162
163 /**
164 * Probe Octeon's caches
165 *
166 */
probe_octeon(void)167 static void probe_octeon(void)
168 {
169 unsigned long icache_size;
170 unsigned long dcache_size;
171 unsigned int config1;
172 struct cpuinfo_mips *c = ¤t_cpu_data;
173 int cputype = current_cpu_type();
174
175 config1 = read_c0_config1();
176 switch (cputype) {
177 case CPU_CAVIUM_OCTEON:
178 case CPU_CAVIUM_OCTEON_PLUS:
179 c->icache.linesz = 2 << ((config1 >> 19) & 7);
180 c->icache.sets = 64 << ((config1 >> 22) & 7);
181 c->icache.ways = 1 + ((config1 >> 16) & 7);
182 c->icache.flags |= MIPS_CACHE_VTAG;
183 icache_size =
184 c->icache.sets * c->icache.ways * c->icache.linesz;
185 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
186 c->dcache.linesz = 128;
187 if (cputype == CPU_CAVIUM_OCTEON_PLUS)
188 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
189 else
190 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
191 c->dcache.ways = 64;
192 dcache_size =
193 c->dcache.sets * c->dcache.ways * c->dcache.linesz;
194 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
195 c->options |= MIPS_CPU_PREFETCH;
196 break;
197
198 case CPU_CAVIUM_OCTEON2:
199 c->icache.linesz = 2 << ((config1 >> 19) & 7);
200 c->icache.sets = 8;
201 c->icache.ways = 37;
202 c->icache.flags |= MIPS_CACHE_VTAG;
203 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
204
205 c->dcache.linesz = 128;
206 c->dcache.ways = 32;
207 c->dcache.sets = 8;
208 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
209 c->options |= MIPS_CPU_PREFETCH;
210 break;
211
212 case CPU_CAVIUM_OCTEON3:
213 c->icache.linesz = 128;
214 c->icache.sets = 16;
215 c->icache.ways = 39;
216 c->icache.flags |= MIPS_CACHE_VTAG;
217 icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
218
219 c->dcache.linesz = 128;
220 c->dcache.ways = 32;
221 c->dcache.sets = 8;
222 dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
223 c->options |= MIPS_CPU_PREFETCH;
224 break;
225
226 default:
227 panic("Unsupported Cavium Networks CPU type");
228 break;
229 }
230
231 /* compute a couple of other cache variables */
232 c->icache.waysize = icache_size / c->icache.ways;
233 c->dcache.waysize = dcache_size / c->dcache.ways;
234
235 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
236 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
237
238 if (smp_processor_id() == 0) {
239 pr_info("Primary instruction cache %ldkB, %s, %d way, "
240 "%d sets, linesize %d bytes.\n",
241 icache_size >> 10,
242 cpu_has_vtag_icache ?
243 "virtually tagged" : "physically tagged",
244 c->icache.ways, c->icache.sets, c->icache.linesz);
245
246 pr_info("Primary data cache %ldkB, %d-way, %d sets, "
247 "linesize %d bytes.\n",
248 dcache_size >> 10, c->dcache.ways,
249 c->dcache.sets, c->dcache.linesz);
250 }
251 }
252
octeon_cache_error_setup(void)253 static void octeon_cache_error_setup(void)
254 {
255 extern char except_vec2_octeon;
256 set_handler(0x100, &except_vec2_octeon, 0x80);
257 }
258
259 /**
260 * Setup the Octeon cache flush routines
261 *
262 */
octeon_cache_init(void)263 void octeon_cache_init(void)
264 {
265 probe_octeon();
266
267 shm_align_mask = PAGE_SIZE - 1;
268
269 flush_cache_all = octeon_flush_icache_all;
270 __flush_cache_all = octeon_flush_icache_all;
271 flush_cache_mm = octeon_flush_cache_mm;
272 flush_cache_page = octeon_flush_cache_page;
273 flush_cache_range = octeon_flush_cache_range;
274 flush_icache_all = octeon_flush_icache_all;
275 flush_data_cache_page = octeon_flush_data_cache_page;
276 flush_icache_range = octeon_flush_icache_range;
277 local_flush_icache_range = local_octeon_flush_icache_range;
278 __flush_icache_user_range = octeon_flush_icache_range;
279 __local_flush_icache_user_range = local_octeon_flush_icache_range;
280
281 __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
282
283 build_clear_page();
284 build_copy_page();
285
286 board_cache_error_setup = octeon_cache_error_setup;
287 }
288
289 /*
290 * Handle a cache error exception
291 */
292 static RAW_NOTIFIER_HEAD(co_cache_error_chain);
293
register_co_cache_error_notifier(struct notifier_block * nb)294 int register_co_cache_error_notifier(struct notifier_block *nb)
295 {
296 return raw_notifier_chain_register(&co_cache_error_chain, nb);
297 }
298 EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
299
unregister_co_cache_error_notifier(struct notifier_block * nb)300 int unregister_co_cache_error_notifier(struct notifier_block *nb)
301 {
302 return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
303 }
304 EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
305
co_cache_error_call_notifiers(unsigned long val)306 static void co_cache_error_call_notifiers(unsigned long val)
307 {
308 int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
309 if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
310 u64 dcache_err;
311 unsigned long coreid = cvmx_get_core_num();
312 u64 icache_err = read_octeon_c0_icacheerr();
313
314 if (val) {
315 dcache_err = cache_err_dcache[coreid];
316 cache_err_dcache[coreid] = 0;
317 } else {
318 dcache_err = read_octeon_c0_dcacheerr();
319 }
320
321 pr_err("Core%lu: Cache error exception:\n", coreid);
322 pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
323 if (icache_err & 1) {
324 pr_err("CacheErr (Icache) == %llx\n",
325 (unsigned long long)icache_err);
326 write_octeon_c0_icacheerr(0);
327 }
328 if (dcache_err & 1) {
329 pr_err("CacheErr (Dcache) == %llx\n",
330 (unsigned long long)dcache_err);
331 }
332 }
333 }
334
335 /*
336 * Called when the the exception is recoverable
337 */
338
cache_parity_error_octeon_recoverable(void)339 asmlinkage void cache_parity_error_octeon_recoverable(void)
340 {
341 co_cache_error_call_notifiers(0);
342 }
343
344 /**
345 * Called when the the exception is not recoverable
346 */
347
cache_parity_error_octeon_non_recoverable(void)348 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
349 {
350 co_cache_error_call_notifiers(1);
351 panic("Can't handle cache error: nested exception");
352 }
353