1 /*
2 * Copyright (c) 2024 Andes Technology Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "soc_v5.h"
8
9 #include <zephyr/init.h>
10 #include <zephyr/kernel.h>
11 #include <zephyr/arch/riscv/csr.h>
12 #include <zephyr/drivers/cache.h>
13 #include <zephyr/logging/log.h>
14
15 LOG_MODULE_REGISTER(cache_andes, CONFIG_CACHE_LOG_LEVEL);
16
17 /* L1 CCTL Command */
18 #define CCTL_L1D_VA_INVAL 0
19 #define CCTL_L1D_VA_WB 1
20 #define CCTL_L1D_VA_WBINVAL 2
21 #define CCTL_L1D_WBINVAL_ALL 6
22 #define CCTL_L1D_WB_ALL 7
23 #define CCTL_L1I_VA_INVAL 8
24 #define CCTL_L1D_INVAL_ALL 23
25 #define CCTL_L1I_IX_INVAL 24
26
27 /* mcache_ctl bitfield */
28 #define MCACHE_CTL_IC_EN BIT(0)
29 #define MCACHE_CTL_DC_EN BIT(1)
30 #define MCACHE_CTL_CCTL_SUEN BIT(8)
31 #define MCACHE_CTL_DC_COHEN BIT(19)
32 #define MCACHE_CTL_DC_COHSTA BIT(20)
33
34 /* micm_cfg bitfield */
35 #define MICM_CFG_ISET BIT_MASK(3)
36 #define MICM_CFG_IWAY_SHIFT 3
37 #define MICM_CFG_ISZ_SHIFT 6
38
39 /* mdcm_cfg bitfield */
40 #define MDCM_CFG_DSZ_SHIFT 6
41
42 /* mmsc_cfg bitfield */
43 #define MMSC_CFG_CCTLCSR BIT(16)
44 #define MMSC_CFG_VCCTL_2 BIT(19)
45 #define MMSC_CFG_MSC_EXT BIT(31)
46 #define MMSC_CFG_RVARCH BIT64(52)
47
48 /* mmsc_cfg2 bitfield */
49 #define MMSC_CFG2_RVARCH BIT(20)
50
51 /* mrvarch_cfg bitfield */
52 #define MRVARCH_CFG_SMEPMP BIT(4)
53
54 #define K_CACHE_WB BIT(0)
55 #define K_CACHE_INVD BIT(1)
56 #define K_CACHE_WB_INVD (K_CACHE_WB | K_CACHE_INVD)
57
58 struct cache_config {
59 uint32_t instr_line_size;
60 uint32_t data_line_size;
61 uint32_t l2_cache_size;
62 uint32_t l2_cache_inclusive;
63 bool is_cctl_supported;
64 };
65
66 static struct cache_config cache_cfg;
67 static struct k_spinlock lock;
68
69 #if DT_NODE_HAS_COMPAT_STATUS(DT_INST(0, andestech_l2c), andestech_l2c, okay)
70 #include "cache_andes_l2.h"
71 #else
nds_l2_cache_enable(void)72 static ALWAYS_INLINE void nds_l2_cache_enable(void) { }
nds_l2_cache_disable(void)73 static ALWAYS_INLINE void nds_l2_cache_disable(void) { }
nds_l2_cache_range(void * addr,size_t size,int op)74 static ALWAYS_INLINE int nds_l2_cache_range(void *addr, size_t size, int op) { return 0; }
nds_l2_cache_all(int op)75 static ALWAYS_INLINE int nds_l2_cache_all(int op) { return 0; }
nds_l2_cache_is_inclusive(void)76 static ALWAYS_INLINE int nds_l2_cache_is_inclusive(void) { return 0; }
nds_l2_cache_init(void)77 static ALWAYS_INLINE int nds_l2_cache_init(void) { return 0; }
78 #endif /* DT_NODE_HAS_COMPAT_STATUS(DT_INST(0, andestech_l2c), andestech_l2c, okay) */
79
nds_cctl_range_operations(void * addr,size_t size,int line_size,int cmd)80 static ALWAYS_INLINE int nds_cctl_range_operations(void *addr, size_t size, int line_size, int cmd)
81 {
82 unsigned long last_byte, align_addr;
83 unsigned long status = csr_read(mstatus);
84
85 last_byte = (unsigned long)addr + size - 1;
86 align_addr = ROUND_DOWN(addr, line_size);
87
88 /*
89 * In memory access privilige U mode, applications should use ucctl CSRs
90 * for VA type commands.
91 */
92 if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) {
93 while (align_addr <= last_byte) {
94 csr_write(NDS_UCCTLBEGINADDR, align_addr);
95 csr_write(NDS_UCCTLCOMMAND, cmd);
96 align_addr += line_size;
97 }
98 } else {
99 while (align_addr <= last_byte) {
100 csr_write(NDS_MCCTLBEGINADDR, align_addr);
101 csr_write(NDS_MCCTLCOMMAND, cmd);
102 align_addr += line_size;
103 }
104 }
105
106 return 0;
107 }
108
nds_l1i_cache_all(int op)109 static ALWAYS_INLINE int nds_l1i_cache_all(int op)
110 {
111 unsigned long sets, ways, end;
112 unsigned long status = csr_read(mstatus);
113
114 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) {
115 /*
116 * In memory access privilige U mode, applications can only use
117 * VA type commands for specific range.
118 */
119 if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) {
120 return -ENOTSUP;
121 }
122 }
123
124 if (op == K_CACHE_INVD) {
125 sets = 0x40 << (csr_read(NDS_MICM_CFG) & MICM_CFG_ISET);
126 ways = ((csr_read(NDS_MICM_CFG) >> MICM_CFG_IWAY_SHIFT) & BIT_MASK(3)) + 1;
127 end = ways * sets * cache_cfg.instr_line_size;
128
129 for (int i = 0; i < end; i += cache_cfg.instr_line_size) {
130 csr_write(NDS_MCCTLBEGINADDR, i);
131 csr_write(NDS_MCCTLCOMMAND, CCTL_L1I_IX_INVAL);
132 }
133 }
134
135 return 0;
136 }
137
nds_l1d_cache_all(int op)138 static ALWAYS_INLINE int nds_l1d_cache_all(int op)
139 {
140 unsigned long status = csr_read(mstatus);
141
142 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) {
143 /*
144 * In memory access privilige U mode, applications can only use
145 * VA type commands for specific range.
146 */
147 if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) {
148 return -ENOTSUP;
149 }
150 }
151
152 switch (op) {
153 case K_CACHE_WB:
154 csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_WB_ALL);
155 break;
156 case K_CACHE_INVD:
157 csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_INVAL_ALL);
158 break;
159 case K_CACHE_WB_INVD:
160 csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_WBINVAL_ALL);
161 break;
162 default:
163 return -ENOTSUP;
164 }
165
166 return 0;
167 }
168
nds_l1i_cache_range(void * addr,size_t size,int op)169 static ALWAYS_INLINE int nds_l1i_cache_range(void *addr, size_t size, int op)
170 {
171 unsigned long cmd;
172
173 if (op == K_CACHE_INVD) {
174 cmd = CCTL_L1I_VA_INVAL;
175 nds_cctl_range_operations(addr, size, cache_cfg.instr_line_size, cmd);
176 }
177
178 return 0;
179 }
180
nds_l1d_cache_range(void * addr,size_t size,int op)181 static ALWAYS_INLINE int nds_l1d_cache_range(void *addr, size_t size, int op)
182 {
183 unsigned long cmd;
184
185 switch (op) {
186 case K_CACHE_WB:
187 cmd = CCTL_L1D_VA_WB;
188 break;
189 case K_CACHE_INVD:
190 cmd = CCTL_L1D_VA_INVAL;
191 break;
192 case K_CACHE_WB_INVD:
193 cmd = CCTL_L1D_VA_WBINVAL;
194 break;
195 default:
196 return -ENOTSUP;
197 }
198
199 nds_cctl_range_operations(addr, size, cache_cfg.data_line_size, cmd);
200
201 return 0;
202 }
203
cache_data_enable(void)204 void cache_data_enable(void)
205 {
206 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
207 return;
208 }
209
210 K_SPINLOCK(&lock) {
211 nds_l2_cache_enable();
212
213 /* Enable D-cache coherence management */
214 csr_set(NDS_MCACHE_CTL, MCACHE_CTL_DC_COHEN);
215
216 /* Check if CPU support CM or not. */
217 if (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHEN) {
218 /* Wait for cache coherence enabling completed */
219 while (!(csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA)) {
220 ;
221 }
222 }
223
224 /* Enable D-cache */
225 csr_set(NDS_MCACHE_CTL, MCACHE_CTL_DC_EN);
226 }
227 }
228
cache_data_disable(void)229 void cache_data_disable(void)
230 {
231 unsigned long status = csr_read(mstatus);
232
233 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
234 return;
235 }
236
237 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) {
238 if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) {
239 if (!cache_cfg.l2_cache_inclusive) {
240 return;
241 }
242 }
243 }
244
245 K_SPINLOCK(&lock) {
246 if (cache_cfg.l2_cache_inclusive) {
247 nds_l2_cache_all(K_CACHE_WB_INVD);
248 } else {
249 nds_l1d_cache_all(K_CACHE_WB_INVD);
250 nds_l2_cache_all(K_CACHE_WB_INVD);
251 }
252
253 csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_DC_EN);
254
255 /* Check if CPU support CM or not. */
256 if (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA) {
257 csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_DC_COHEN);
258 /* Wait for cache coherence disabling completed */
259 while (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA) {
260 ;
261 }
262 }
263 nds_l2_cache_disable();
264 }
265 }
266
cache_instr_enable(void)267 void cache_instr_enable(void)
268 {
269 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
270 return;
271 }
272
273 csr_set(NDS_MCACHE_CTL, MCACHE_CTL_IC_EN);
274 }
275
cache_instr_disable(void)276 void cache_instr_disable(void)
277 {
278 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
279 return;
280 }
281
282 csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_IC_EN);
283 }
284
cache_data_invd_all(void)285 int cache_data_invd_all(void)
286 {
287 unsigned long ret = 0;
288
289 if (!cache_cfg.is_cctl_supported) {
290 return -ENOTSUP;
291 }
292
293 K_SPINLOCK(&lock) {
294 if (cache_cfg.l2_cache_inclusive) {
295 ret |= nds_l2_cache_all(K_CACHE_WB);
296 ret |= nds_l2_cache_all(K_CACHE_INVD);
297 } else {
298 ret |= nds_l1d_cache_all(K_CACHE_WB);
299 ret |= nds_l2_cache_all(K_CACHE_WB);
300 ret |= nds_l2_cache_all(K_CACHE_INVD);
301 ret |= nds_l1d_cache_all(K_CACHE_INVD);
302 }
303 }
304
305 return ret;
306 }
307
cache_data_invd_range(void * addr,size_t size)308 int cache_data_invd_range(void *addr, size_t size)
309 {
310 unsigned long ret = 0;
311
312 if (!cache_cfg.is_cctl_supported) {
313 return -ENOTSUP;
314 }
315
316 K_SPINLOCK(&lock) {
317 if (cache_cfg.l2_cache_inclusive) {
318 ret |= nds_l2_cache_range(addr, size, K_CACHE_INVD);
319 } else {
320 ret |= nds_l2_cache_range(addr, size, K_CACHE_INVD);
321 ret |= nds_l1d_cache_range(addr, size, K_CACHE_INVD);
322 }
323 }
324
325 return ret;
326 }
327
cache_instr_invd_all(void)328 int cache_instr_invd_all(void)
329 {
330 unsigned long ret = 0;
331
332 if (!cache_cfg.is_cctl_supported) {
333 return -ENOTSUP;
334 }
335
336 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
337 return -ENOTSUP;
338 }
339
340 if (IS_ENABLED(CONFIG_RISCV_PMP)) {
341 /* CCTL IX type command is not to RISC-V Smepmp */
342 if (IS_ENABLED(CONFIG_64BIT)) {
343 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_RVARCH) {
344 if (csr_read(NDS_MRVARCH_CFG) & MRVARCH_CFG_SMEPMP) {
345 return -ENOTSUP;
346 }
347 }
348 } else {
349 if ((csr_read(NDS_MMSC_CFG) & MMSC_CFG_MSC_EXT) &&
350 (csr_read(NDS_MMSC_CFG2) & MMSC_CFG2_RVARCH)) {
351 if (csr_read(NDS_MRVARCH_CFG) & MRVARCH_CFG_SMEPMP) {
352 return -ENOTSUP;
353 }
354 }
355 }
356 }
357
358 K_SPINLOCK(&lock) {
359 ret |= nds_l1i_cache_all(K_CACHE_INVD);
360 }
361
362 return ret;
363 }
364
cache_instr_invd_range(void * addr,size_t size)365 int cache_instr_invd_range(void *addr, size_t size)
366 {
367 unsigned long ret = 0;
368
369 if (!cache_cfg.is_cctl_supported) {
370 return -ENOTSUP;
371 }
372
373 if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
374 ARG_UNUSED(addr);
375 ARG_UNUSED(size);
376
377 return -ENOTSUP;
378 }
379
380 K_SPINLOCK(&lock) {
381 ret |= nds_l1i_cache_range(addr, size, K_CACHE_INVD);
382 }
383
384 return ret;
385 }
386
cache_data_flush_all(void)387 int cache_data_flush_all(void)
388 {
389 unsigned long ret = 0;
390
391 if (!cache_cfg.is_cctl_supported) {
392 return -ENOTSUP;
393 }
394
395 K_SPINLOCK(&lock) {
396 if (cache_cfg.l2_cache_inclusive) {
397 ret |= nds_l2_cache_all(K_CACHE_WB);
398 } else {
399 ret |= nds_l1d_cache_all(K_CACHE_WB);
400 ret |= nds_l2_cache_all(K_CACHE_WB);
401 }
402 }
403
404 return ret;
405 }
406
cache_data_flush_range(void * addr,size_t size)407 int cache_data_flush_range(void *addr, size_t size)
408 {
409 unsigned long ret = 0;
410
411 if (!cache_cfg.is_cctl_supported) {
412 return -ENOTSUP;
413 }
414
415 K_SPINLOCK(&lock) {
416 if (cache_cfg.l2_cache_inclusive) {
417 ret |= nds_l2_cache_range(addr, size, K_CACHE_WB);
418 } else {
419 ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB);
420 ret |= nds_l2_cache_range(addr, size, K_CACHE_WB);
421 }
422 }
423
424 return ret;
425 }
426
cache_data_flush_and_invd_all(void)427 int cache_data_flush_and_invd_all(void)
428 {
429 unsigned long ret = 0;
430
431 if (!cache_cfg.is_cctl_supported) {
432 return -ENOTSUP;
433 }
434
435 K_SPINLOCK(&lock) {
436 if (cache_cfg.l2_cache_size) {
437 if (cache_cfg.l2_cache_inclusive) {
438 ret |= nds_l2_cache_all(K_CACHE_WB_INVD);
439 } else {
440 ret |= nds_l1d_cache_all(K_CACHE_WB);
441 ret |= nds_l2_cache_all(K_CACHE_WB_INVD);
442 ret |= nds_l1d_cache_all(K_CACHE_INVD);
443 }
444 } else {
445 ret |= nds_l1d_cache_all(K_CACHE_WB_INVD);
446 }
447 }
448
449 return ret;
450 }
451
cache_data_flush_and_invd_range(void * addr,size_t size)452 int cache_data_flush_and_invd_range(void *addr, size_t size)
453 {
454 unsigned long ret = 0;
455
456 if (!cache_cfg.is_cctl_supported) {
457 return -ENOTSUP;
458 }
459
460 K_SPINLOCK(&lock) {
461 if (cache_cfg.l2_cache_size) {
462 if (cache_cfg.l2_cache_inclusive) {
463 ret |= nds_l2_cache_range(addr, size, K_CACHE_WB_INVD);
464 } else {
465 ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB);
466 ret |= nds_l2_cache_range(addr, size, K_CACHE_WB_INVD);
467 ret |= nds_l1d_cache_range(addr, size, K_CACHE_INVD);
468 }
469 } else {
470 ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB_INVD);
471 }
472 }
473
474 return ret;
475 }
476
cache_instr_flush_all(void)477 int cache_instr_flush_all(void)
478 {
479 return -ENOTSUP;
480 }
481
cache_instr_flush_and_invd_all(void)482 int cache_instr_flush_and_invd_all(void)
483 {
484 return -ENOTSUP;
485 }
486
cache_instr_flush_range(void * addr,size_t size)487 int cache_instr_flush_range(void *addr, size_t size)
488 {
489 ARG_UNUSED(addr);
490 ARG_UNUSED(size);
491
492 return -ENOTSUP;
493 }
494
cache_instr_flush_and_invd_range(void * addr,size_t size)495 int cache_instr_flush_and_invd_range(void *addr, size_t size)
496 {
497 ARG_UNUSED(addr);
498 ARG_UNUSED(size);
499
500 return -ENOTSUP;
501 }
502
503 #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
cache_data_line_size_get(void)504 size_t cache_data_line_size_get(void)
505 {
506 return cache_cfg.data_line_size;
507 }
508 #endif /* defined(CONFIG_DCACHE_LINE_SIZE_DETECT) */
509
510 #if defined(CONFIG_ICACHE_LINE_SIZE_DETECT)
cache_instr_line_size_get(void)511 size_t cache_instr_line_size_get(void)
512 {
513 return cache_cfg.instr_line_size;
514 }
515 #endif /* defined(CONFIG_ICACHE_LINE_SIZE_DETECT) */
516
andes_cache_init(void)517 static int andes_cache_init(void)
518 {
519 unsigned long line_size;
520
521 if (IS_ENABLED(CONFIG_ICACHE)) {
522 line_size = (csr_read(NDS_MICM_CFG) >> MICM_CFG_ISZ_SHIFT) & BIT_MASK(3);
523
524 if (line_size == 0) {
525 LOG_ERR("Platform doesn't support I-cache, "
526 "please disable CONFIG_ICACHE");
527 }
528 #if defined(CONFIG_ICACHE_LINE_SIZE_DETECT)
529 /* Icache line size */
530 if (line_size <= 5) {
531 cache_cfg.instr_line_size = 1 << (line_size + 2);
532 } else {
533 LOG_ERR("Unknown line size of I-cache");
534 }
535 #elif (CONFIG_ICACHE_LINE_SIZE != 0)
536 cache_cfg.instr_line_size = CONFIG_ICACHE_LINE_SIZE;
537 #elif DT_NODE_HAS_PROP(DT_PATH(cpus, cpu_0), i_cache_line_size)
538 cache_cfg.instr_line_size =
539 DT_PROP(DT_PATH(cpus, cpu_0), i_cache_line_size);
540 #else
541 LOG_ERR("Please specific the i-cache-line-size "
542 "CPU0 property of the DT");
543 #endif /* defined(CONFIG_ICACHE_LINE_SIZE_DETECT) */
544 }
545
546 if (IS_ENABLED(CONFIG_DCACHE)) {
547 line_size = (csr_read(NDS_MDCM_CFG) >> MDCM_CFG_DSZ_SHIFT) & BIT_MASK(3);
548 if (line_size == 0) {
549 LOG_ERR("Platform doesn't support D-cache, "
550 "please disable CONFIG_DCACHE");
551 }
552 #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT)
553 /* Dcache line size */
554 if (line_size <= 5) {
555 cache_cfg.data_line_size = 1 << (line_size + 2);
556 } else {
557 LOG_ERR("Unknown line size of D-cache");
558 }
559 #elif (CONFIG_DCACHE_LINE_SIZE != 0)
560 cache_cfg.data_line_size = CONFIG_DCACHE_LINE_SIZE;
561 #elif DT_NODE_HAS_PROP(DT_PATH(cpus, cpu_0), d_cache_line_size)
562 cache_cfg.data_line_size =
563 DT_PROP(DT_PATH(cpus, cpu_0), d_cache_line_size);
564 #else
565 LOG_ERR("Please specific the d-cache-line-size "
566 "CPU0 property of the DT");
567 #endif /* defined(CONFIG_DCACHE_LINE_SIZE_DETECT) */
568 }
569
570 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_CCTLCSR) {
571 cache_cfg.is_cctl_supported = true;
572 }
573
574 if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) {
575 if (IS_ENABLED(CONFIG_PMP_STACK_GUARD)) {
576 csr_set(NDS_MCACHE_CTL, MCACHE_CTL_CCTL_SUEN);
577 }
578 }
579
580 cache_cfg.l2_cache_size = nds_l2_cache_init(cache_cfg.data_line_size);
581 cache_cfg.l2_cache_inclusive = nds_l2_cache_is_inclusive();
582
583 return 0;
584 }
585
586 SYS_INIT(andes_cache_init, PRE_KERNEL_1, CONFIG_CACHE_ANDES_INIT_PRIORITY);
587