1 /*
2  * Copyright (c) 2022 ASPEED Technology Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 #include <zephyr/drivers/syscon.h>
9 #include <zephyr/sys/barrier.h>
10 
11 /*
12  * cache area control: each bit controls 32KB cache area
13  *	1: cacheable
14  *	0: no-cache
15  *
16  *	bit[0]: 1st 32KB from 0x0000_0000 to 0x0000_7fff
17  *	bit[1]: 2nd 32KB from 0x0000_8000 to 0x0000_ffff
18  *	...
19  *	bit[22]: 23th 32KB from 0x000a_8000 to 0x000a_ffff
20  *	bit[23]: 24th 32KB from 0x000b_0000 to 0x000b_ffff
21  */
22 #define CACHE_AREA_CTRL_REG	0xa50
23 #define CACHE_INVALID_REG	0xa54
24 #define CACHE_FUNC_CTRL_REG	0xa58
25 
26 #define CACHED_SRAM_ADDR	CONFIG_SRAM_BASE_ADDRESS
27 #define CACHED_SRAM_SIZE	KB(CONFIG_SRAM_SIZE)
28 #define CACHED_SRAM_END		(CACHED_SRAM_ADDR + CACHED_SRAM_SIZE - 1)
29 
30 #define CACHE_AREA_SIZE_LOG2	15
31 #define CACHE_AREA_SIZE		(1 << CACHE_AREA_SIZE_LOG2)
32 
33 #define DCACHE_INVALID(addr)	(BIT(31) | ((addr & GENMASK(10, 0)) << 16))
34 #define ICACHE_INVALID(addr)	(BIT(15) | ((addr & GENMASK(10, 0)) << 0))
35 
36 #define ICACHE_CLEAN		BIT(2)
37 #define DCACHE_CLEAN		BIT(1)
38 #define CACHE_ENABLE		BIT(0)
39 
40 /* cache size = 32B * 128 = 4KB */
41 #define CACHE_LINE_SIZE_LOG2	5
42 #define CACHE_LINE_SIZE		(1 << CACHE_LINE_SIZE_LOG2)
43 #define N_CACHE_LINE		128
44 #define CACHE_ALIGNED_ADDR(addr) \
45 	((addr >> CACHE_LINE_SIZE_LOG2) << CACHE_LINE_SIZE_LOG2)
46 
47 /* prefetch buffer */
48 #define PREFETCH_BUF_SIZE	CACHE_LINE_SIZE
49 
aspeed_cache_init(void)50 static void aspeed_cache_init(void)
51 {
52 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
53 	uint32_t start_bit, end_bit, max_bit;
54 
55 	/* set all cache areas to no-cache by default */
56 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0);
57 
58 	/* calculate how many areas need to be set */
59 	max_bit = 8 * sizeof(uint32_t) - 1;
60 	start_bit = MIN(max_bit, CACHED_SRAM_ADDR >> CACHE_AREA_SIZE_LOG2);
61 	end_bit = MIN(max_bit, CACHED_SRAM_END >> CACHE_AREA_SIZE_LOG2);
62 	syscon_write_reg(dev, CACHE_AREA_CTRL_REG, GENMASK(end_bit, start_bit));
63 
64 	/* enable cache */
65 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, CACHE_ENABLE);
66 }
67 
68 /**
69  * @brief get aligned address and the number of cachline to be invalied
70  * @param [IN] addr - start address to be invalidated
71  * @param [IN] size - size in byte
72  * @param [OUT] p_aligned_addr - pointer to the cacheline aligned address variable
73  * @return number of cacheline to be invalidated
74  *
75  *  * addr
76  *   |--------size-------------|
77  * |-----|-----|-----|-----|-----|
78  *  \                             \
79  *   head                          tail
80  *
81  * example 1:
82  * addr = 0x100 (cacheline aligned), size = 64
83  * then head = 0x100, number of cache line to be invalidated = 64 / 32 = 2
84  * which means range [0x100, 0x140) will be invalidated
85  *
86  * example 2:
87  * addr = 0x104 (cacheline unaligned), size = 64
88  * then head = 0x100, number of cache line to be invalidated = 1 + 64 / 32 = 3
89  * which means range [0x100, 0x160) will be invalidated
90  */
get_n_cacheline(uint32_t addr,uint32_t size,uint32_t * p_head)91 static uint32_t get_n_cacheline(uint32_t addr, uint32_t size, uint32_t *p_head)
92 {
93 	uint32_t n = 0;
94 	uint32_t tail;
95 
96 	/* head */
97 	*p_head = CACHE_ALIGNED_ADDR(addr);
98 
99 	/* roundup the tail address */
100 	tail = addr + size + (CACHE_LINE_SIZE - 1);
101 	tail = CACHE_ALIGNED_ADDR(tail);
102 
103 	n = (tail - *p_head) >> CACHE_LINE_SIZE_LOG2;
104 
105 	return n;
106 }
107 
cache_data_enable(void)108 void cache_data_enable(void)
109 {
110 	aspeed_cache_init();
111 }
112 
cache_data_disable(void)113 void cache_data_disable(void)
114 {
115 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
116 
117 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0);
118 }
119 
cache_instr_enable(void)120 void cache_instr_enable(void)
121 {
122 	aspeed_cache_init();
123 }
124 
cache_instr_disable(void)125 void cache_instr_disable(void)
126 {
127 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
128 
129 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0);
130 }
131 
cache_data_invd_all(void)132 int cache_data_invd_all(void)
133 {
134 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
135 	uint32_t ctrl;
136 	unsigned int key = 0;
137 
138 	syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl);
139 
140 	/* enter critical section */
141 	if (!k_is_in_isr()) {
142 		key = irq_lock();
143 	}
144 
145 	ctrl &= ~DCACHE_CLEAN;
146 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl);
147 
148 	barrier_dsync_fence_full();
149 	ctrl |= DCACHE_CLEAN;
150 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl);
151 	barrier_dsync_fence_full();
152 
153 	/* exit critical section */
154 	if (!k_is_in_isr()) {
155 		irq_unlock(key);
156 	}
157 
158 	return 0;
159 }
160 
cache_data_invd_range(void * addr,size_t size)161 int cache_data_invd_range(void *addr, size_t size)
162 {
163 	uint32_t aligned_addr, i, n;
164 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
165 	unsigned int key = 0;
166 
167 	if (((uint32_t)addr < CACHED_SRAM_ADDR) ||
168 	    ((uint32_t)addr > CACHED_SRAM_END)) {
169 		return 0;
170 	}
171 
172 	/* enter critical section */
173 	if (!k_is_in_isr()) {
174 		key = irq_lock();
175 	}
176 
177 	n = get_n_cacheline((uint32_t)addr, size, &aligned_addr);
178 
179 	for (i = 0; i < n; i++) {
180 		syscon_write_reg(dev, CACHE_INVALID_REG, 0);
181 		syscon_write_reg(dev, CACHE_INVALID_REG, DCACHE_INVALID(aligned_addr));
182 		aligned_addr += CACHE_LINE_SIZE;
183 	}
184 	barrier_dsync_fence_full();
185 
186 	/* exit critical section */
187 	if (!k_is_in_isr()) {
188 		irq_unlock(key);
189 	}
190 
191 	return 0;
192 }
193 
cache_instr_invd_all(void)194 int cache_instr_invd_all(void)
195 {
196 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
197 	uint32_t ctrl;
198 	unsigned int key = 0;
199 
200 	syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl);
201 
202 	/* enter critical section */
203 	if (!k_is_in_isr()) {
204 		key = irq_lock();
205 	}
206 
207 	ctrl &= ~ICACHE_CLEAN;
208 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl);
209 	barrier_isync_fence_full();
210 	ctrl |= ICACHE_CLEAN;
211 	syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl);
212 	barrier_isync_fence_full();
213 
214 	/* exit critical section */
215 	if (!k_is_in_isr()) {
216 		irq_unlock(key);
217 	}
218 
219 	return 0;
220 }
221 
cache_instr_invd_range(void * addr,size_t size)222 int cache_instr_invd_range(void *addr, size_t size)
223 {
224 	uint32_t aligned_addr, i, n;
225 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
226 	unsigned int key = 0;
227 
228 	if (((uint32_t)addr < CACHED_SRAM_ADDR) ||
229 	    ((uint32_t)addr > CACHED_SRAM_END)) {
230 		return 0;
231 	}
232 
233 	n = get_n_cacheline((uint32_t)addr, size, &aligned_addr);
234 
235 	/* enter critical section */
236 	if (!k_is_in_isr()) {
237 		key = irq_lock();
238 	}
239 
240 	for (i = 0; i < n; i++) {
241 		syscon_write_reg(dev, CACHE_INVALID_REG, 0);
242 		syscon_write_reg(dev, CACHE_INVALID_REG, ICACHE_INVALID(aligned_addr));
243 		aligned_addr += CACHE_LINE_SIZE;
244 	}
245 	barrier_dsync_fence_full();
246 
247 	/* exit critical section */
248 	if (!k_is_in_isr()) {
249 		irq_unlock(key);
250 	}
251 
252 	return 0;
253 }
254 
cache_data_flush_all(void)255 int cache_data_flush_all(void)
256 {
257 	return -ENOTSUP;
258 }
259 
cache_data_flush_and_invd_all(void)260 int cache_data_flush_and_invd_all(void)
261 {
262 	return -ENOTSUP;
263 }
264 
cache_data_flush_range(void * addr,size_t size)265 int cache_data_flush_range(void *addr, size_t size)
266 {
267 	ARG_UNUSED(addr);
268 	ARG_UNUSED(size);
269 
270 	return -ENOTSUP;
271 }
272 
cache_data_flush_and_invd_range(void * addr,size_t size)273 int cache_data_flush_and_invd_range(void *addr, size_t size)
274 {
275 	ARG_UNUSED(addr);
276 	ARG_UNUSED(size);
277 
278 	return -ENOTSUP;
279 }
280 
cache_instr_flush_all(void)281 int cache_instr_flush_all(void)
282 {
283 	return -ENOTSUP;
284 }
285 
cache_instr_flush_and_invd_all(void)286 int cache_instr_flush_and_invd_all(void)
287 {
288 	return -ENOTSUP;
289 }
290 
cache_instr_flush_range(void * addr,size_t size)291 int cache_instr_flush_range(void *addr, size_t size)
292 {
293 	ARG_UNUSED(addr);
294 	ARG_UNUSED(size);
295 
296 	return -ENOTSUP;
297 }
298 
cache_instr_flush_and_invd_range(void * addr,size_t size)299 int cache_instr_flush_and_invd_range(void *addr, size_t size)
300 {
301 	ARG_UNUSED(addr);
302 	ARG_UNUSED(size);
303 
304 	return -ENOTSUP;
305 }
306 
307 
308 #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
cache_data_line_size_get(void)309 size_t cache_data_line_size_get(void)
310 {
311 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
312 	uint32_t ctrl;
313 
314 	syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl);
315 
316 	return (ctrl & CACHE_ENABLE) ? CACHE_LINE_SIZE : 0;
317 }
318 #endif /* CONFIG_DCACHE_LINE_SIZE_DETECT */
319 
320 #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
cache_instr_line_size_get(void)321 size_t cache_instr_line_size_get(void)
322 {
323 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon));
324 	uint32_t ctrl;
325 
326 	syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl);
327 
328 	return (ctrl & CACHE_ENABLE) ? CACHE_LINE_SIZE : 0;
329 }
330 #endif /* CONFIG_ICACHE_LINE_SIZE_DETECT */
331