1 /*
2  * Copyright (c) 2024 Andes Technology Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_
8 #define ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_
9 
10 #include <zephyr/kernel.h>
11 #include <zephyr/arch/cpu.h>
12 #include <zephyr/drivers/syscon.h>
13 
14 #define L2C_BASE		DT_REG_ADDR_BY_IDX(DT_INST(0, andestech_l2c), 0)
15 
16 /* L2 cache Register Offset */
17 #define L2C_CONFIG		(L2C_BASE + 0x00)
18 #define L2C_CTRL		(L2C_BASE + 0x08)
19 #define L2C_CCTLCMD(hart_id)	\
20 	(L2C_BASE + 0x40 + (hart_id * l2_cache_cfg.cmd_offset))
21 #define L2C_CCTLACC(hart_id)	\
22 	(L2C_BASE + 0x48 + (hart_id * l2_cache_cfg.cmd_offset))
23 #define L2C_CCTLST(hart_id)	\
24 	(L2C_BASE + 0x80 + (hart_id * l2_cache_cfg.status_offset))
25 
26 /* L2 cache config registers bitfields */
27 #define L2C_CONFIG_SIZE_SHIFT		7
28 #define L2C_CONFIG_MAP			BIT(20)
29 #define L2C_CONFIG_VERSION_SHIFT	24
30 
31 /* L2 cache control registers bitfields */
32 #define L2C_CTRL_CEN		BIT(0)
33 #define L2C_CTRL_IPFDPT_3	GENMASK(4, 3)
34 #define L2C_CTRL_DPFDPT_8	GENMASK(7, 6)
35 
36 /* L2 cache CCTL Access Line registers bitfields */
37 #define L2C_CCTLACC_WAY_SHIFT	28
38 
39 /* L2 CCTL Command */
40 #define CCTL_L2_IX_INVAL	0x00
41 #define CCTL_L2_IX_WB		0x01
42 #define CCTL_L2_PA_INVAL	0x08
43 #define CCTL_L2_PA_WB		0x09
44 #define CCTL_L2_PA_WBINVAL	0x0a
45 #define CCTL_L2_WBINVAL_ALL	0x12
46 
47 #define K_CACHE_WB		BIT(0)
48 #define K_CACHE_INVD		BIT(1)
49 #define K_CACHE_WB_INVD		(K_CACHE_WB | K_CACHE_INVD)
50 
51 struct nds_l2_cache_config {
52 	uint32_t size;
53 	uint32_t cmd_offset;
54 	uint32_t status_offset;
55 	uint16_t status_shift;
56 	uint8_t version;
57 	uint8_t line_size;
58 };
59 
60 static struct nds_l2_cache_config l2_cache_cfg;
61 
nds_l2_cache_is_inclusive(void)62 static ALWAYS_INLINE int nds_l2_cache_is_inclusive(void)
63 {
64 	return IS_ENABLED(CONFIG_L2C_INCLUSIVE_POLICY) &&
65 			(l2_cache_cfg.version > 15);
66 
67 }
68 
nds_l2_cache_wait_status(uint8_t hart_id)69 static ALWAYS_INLINE void nds_l2_cache_wait_status(uint8_t hart_id)
70 {
71 	uint32_t status;
72 
73 	do {
74 		status = sys_read32(L2C_CCTLST(hart_id));
75 		status >>= hart_id * l2_cache_cfg.status_shift;
76 		status &= BIT_MASK(4);
77 	} while (status == 1);
78 }
79 
nds_l2_cache_all(int op)80 static ALWAYS_INLINE int nds_l2_cache_all(int op)
81 {
82 	unsigned long ways, sets, index, cmd;
83 	uint8_t hart_id;
84 	unsigned long status = csr_read(mstatus);
85 
86 	if (!l2_cache_cfg.size) {
87 		return -ENOTSUP;
88 	} else if (l2_cache_cfg.size >= 128 * 1024) {
89 		ways = 16;
90 	} else {
91 		ways = 8;
92 	}
93 
94 	if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) {
95 		if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) {
96 			if (!nds_l2_cache_is_inclusive()) {
97 				return -ENOTSUP;
98 			}
99 		}
100 	}
101 
102 	switch (op) {
103 	case K_CACHE_WB:
104 		cmd = CCTL_L2_IX_WB;
105 		break;
106 	case K_CACHE_INVD:
107 		cmd = CCTL_L2_IX_INVAL;
108 		break;
109 	case K_CACHE_WB_INVD:
110 		cmd = CCTL_L2_WBINVAL_ALL;
111 		break;
112 	default:
113 		return -ENOTSUP;
114 	}
115 
116 	hart_id = arch_proc_id();
117 
118 	if (op == K_CACHE_WB_INVD) {
119 		sys_write32(CCTL_L2_WBINVAL_ALL, L2C_CCTLCMD(hart_id));
120 
121 		/* Wait L2 CCTL Commands finished */
122 		nds_l2_cache_wait_status(hart_id);
123 	} else {
124 		sets = l2_cache_cfg.size / (ways * l2_cache_cfg.line_size);
125 		/* Invalidate all cache line by each way and each set */
126 		for (int j = 0; j < ways; j++) {
127 			/* Index of way */
128 			index = j << L2C_CCTLACC_WAY_SHIFT;
129 			for (int i = 0; i < sets; i++) {
130 				/* Index of set */
131 				index += l2_cache_cfg.line_size;
132 
133 				/* Invalidate each cache line */
134 				sys_write32(index, L2C_CCTLACC(hart_id));
135 				sys_write32(cmd, L2C_CCTLCMD(hart_id));
136 
137 				/* Wait L2 CCTL Commands finished */
138 				nds_l2_cache_wait_status(hart_id);
139 			}
140 		}
141 	}
142 
143 	return 0;
144 }
145 
nds_l2_cache_range(void * addr,size_t size,int op)146 static ALWAYS_INLINE int nds_l2_cache_range(void *addr, size_t size, int op)
147 {
148 	unsigned long last_byte, align_addr, cmd;
149 	uint8_t hart_id;
150 
151 	if (!l2_cache_cfg.size) {
152 		return -ENOTSUP;
153 	}
154 
155 	switch (op) {
156 	case K_CACHE_WB:
157 		cmd = CCTL_L2_PA_WB;
158 		break;
159 	case K_CACHE_INVD:
160 		cmd = CCTL_L2_PA_INVAL;
161 		break;
162 	case K_CACHE_WB_INVD:
163 		cmd = CCTL_L2_PA_WBINVAL;
164 		break;
165 	default:
166 		return -ENOTSUP;
167 	}
168 
169 	last_byte = (unsigned long)addr + size - 1;
170 	align_addr = ROUND_DOWN(addr, l2_cache_cfg.line_size);
171 	hart_id = arch_proc_id();
172 
173 	while (align_addr <= last_byte) {
174 		sys_write32(align_addr, L2C_CCTLACC(hart_id));
175 		sys_write32(cmd, L2C_CCTLCMD(hart_id));
176 		align_addr += l2_cache_cfg.line_size;
177 
178 		/* Wait L2 CCTL Commands finished */
179 		nds_l2_cache_wait_status(hart_id);
180 	}
181 
182 	return 0;
183 }
184 
nds_l2_cache_enable(void)185 static ALWAYS_INLINE void nds_l2_cache_enable(void)
186 {
187 	if (l2_cache_cfg.size) {
188 		uint32_t l2c_ctrl = sys_read32(L2C_CTRL);
189 
190 		if (!(l2c_ctrl & L2C_CTRL_CEN)) {
191 			WRITE_BIT(l2c_ctrl, 0, true);
192 			sys_write32(l2c_ctrl, L2C_CTRL);
193 		}
194 	}
195 }
196 
nds_l2_cache_disable(void)197 static ALWAYS_INLINE void nds_l2_cache_disable(void)
198 {
199 	if (l2_cache_cfg.size) {
200 		uint32_t l2c_ctrl = sys_read32(L2C_CTRL);
201 
202 		if (l2c_ctrl & L2C_CTRL_CEN) {
203 			WRITE_BIT(l2c_ctrl, 0, false);
204 			sys_write32(l2c_ctrl, L2C_CTRL);
205 		}
206 	}
207 }
208 
nds_l2_cache_init(uint8_t line_size)209 static ALWAYS_INLINE int nds_l2_cache_init(uint8_t line_size)
210 {
211 	unsigned long size;
212 	uint32_t l2c_ctrl;
213 
214 #if defined(CONFIG_SYSCON)
215 #if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(syscon), andestech_atcsmu100, okay)
216 	uint32_t system_cfg;
217 	const struct device *syscon_dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
218 
219 	if (device_is_ready(syscon_dev)) {
220 		/* Check L2 cache feature from SMU */
221 		syscon_read_reg(syscon_dev, 0x08, &system_cfg);
222 
223 		/* Platform doesn't support L2 cache controller */
224 		if (!(system_cfg & BIT(8))) {
225 			l2_cache_cfg.size = 0;
226 			return 0;
227 		}
228 	} else {
229 		LOG_ERR("Andes cache driver should be initialized after "
230 			"syscon driver initialization");
231 		return 0;
232 	}
233 #endif /* andestech_atcsmu100 dts node status okay */
234 #endif /* defined(CONFIG_SYSCON) */
235 
236 	l2_cache_cfg.line_size = line_size;
237 
238 	size = (sys_read32(L2C_CONFIG) >> L2C_CONFIG_SIZE_SHIFT) & BIT_MASK(7);
239 	l2_cache_cfg.size = size * 128 * 1024;
240 
241 	if (sys_read32(L2C_CONFIG) & L2C_CONFIG_MAP) {
242 		l2_cache_cfg.cmd_offset = 0x10;
243 		l2_cache_cfg.status_offset = 0;
244 		l2_cache_cfg.status_shift = 4;
245 	} else {
246 		l2_cache_cfg.cmd_offset = 0x1000;
247 		l2_cache_cfg.status_offset = 0x1000;
248 		l2_cache_cfg.status_shift = 0;
249 	}
250 
251 	l2_cache_cfg.version = (sys_read32(L2C_CONFIG) >> L2C_CONFIG_VERSION_SHIFT) & BIT_MASK(8);
252 
253 	/* Initializing L2 cache instruction, data prefetch depth */
254 	l2c_ctrl = sys_read32(L2C_CTRL);
255 	l2c_ctrl |= (L2C_CTRL_IPFDPT_3 | L2C_CTRL_DPFDPT_8);
256 
257 	/* Writeback and invalidate all I/D-Cache before setting L2C */
258 	__asm__ volatile ("fence.i");
259 	sys_write32(l2c_ctrl, L2C_CTRL);
260 
261 	if (IS_ENABLED(CONFIG_SMP)) {
262 		if (l2_cache_cfg.size) {
263 			l2c_ctrl = sys_read32(L2C_CTRL);
264 
265 			if (!(l2c_ctrl & L2C_CTRL_CEN)) {
266 				WRITE_BIT(l2c_ctrl, 0, true);
267 				sys_write32(l2c_ctrl, L2C_CTRL);
268 			}
269 		}
270 	}
271 
272 	return l2_cache_cfg.size;
273 }
274 
275 #endif /* ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_ */
276