1 /*
2  * Copyright 2022 Carlo Caione <ccaione@baylibre.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #ifndef ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
7 #define ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_
8 
9 #ifndef _ASMLANGUAGE
10 
11 #include <zephyr/types.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/sys/barrier.h>
14 #include <zephyr/arch/cpu.h>
15 #include <errno.h>
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #define K_CACHE_WB		BIT(0)
22 #define K_CACHE_INVD		BIT(1)
23 #define K_CACHE_WB_INVD		(K_CACHE_WB | K_CACHE_INVD)
24 
25 #if defined(CONFIG_DCACHE)
26 
27 #define	CTR_EL0_DMINLINE_SHIFT		16
28 #define	CTR_EL0_DMINLINE_MASK		BIT_MASK(4)
29 #define	CTR_EL0_CWG_SHIFT		24
30 #define	CTR_EL0_CWG_MASK		BIT_MASK(4)
31 
32 /* clidr_el1 */
33 #define CLIDR_EL1_LOC_SHIFT		24
34 #define CLIDR_EL1_LOC_MASK		BIT_MASK(3)
35 #define CLIDR_EL1_CTYPE_SHIFT(level)	((level) * 3)
36 #define CLIDR_EL1_CTYPE_MASK		BIT_MASK(3)
37 
38 /* ccsidr_el1 */
39 #define CCSIDR_EL1_LN_SZ_SHIFT		0
40 #define CCSIDR_EL1_LN_SZ_MASK		BIT_MASK(3)
41 #define CCSIDR_EL1_WAYS_SHIFT		3
42 #define CCSIDR_EL1_WAYS_MASK		BIT_MASK(10)
43 #define CCSIDR_EL1_SETS_SHIFT		13
44 #define CCSIDR_EL1_SETS_MASK		BIT_MASK(15)
45 
46 #define dc_ops(op, val)							\
47 ({									\
48 	__asm__ volatile ("dc " op ", %0" :: "r" (val) : "memory");	\
49 })
50 
51 static size_t dcache_line_size;
52 
arch_dcache_line_size_get(void)53 static ALWAYS_INLINE size_t arch_dcache_line_size_get(void)
54 {
55 	uint64_t ctr_el0;
56 	uint32_t dminline;
57 
58 	if (dcache_line_size) {
59 		return dcache_line_size;
60 	}
61 
62 	ctr_el0 = read_sysreg(CTR_EL0);
63 
64 	dminline = (ctr_el0 >> CTR_EL0_DMINLINE_SHIFT) & CTR_EL0_DMINLINE_MASK;
65 
66 	dcache_line_size = 4 << dminline;
67 
68 	return dcache_line_size;
69 }
70 
71 /*
72  * operation for data cache by virtual address to PoC
73  * ops:  K_CACHE_INVD: invalidate
74  *	 K_CACHE_WB: clean
75  *	 K_CACHE_WB_INVD: clean and invalidate
76  */
arm64_dcache_range(void * addr,size_t size,int op)77 static ALWAYS_INLINE int arm64_dcache_range(void *addr, size_t size, int op)
78 {
79 	size_t line_size;
80 	uintptr_t start_addr = (uintptr_t)addr;
81 	uintptr_t end_addr = start_addr + size;
82 
83 	if (op != K_CACHE_INVD && op != K_CACHE_WB && op != K_CACHE_WB_INVD) {
84 		return -ENOTSUP;
85 	}
86 
87 	line_size = arch_dcache_line_size_get();
88 
89 	/*
90 	 * For the data cache invalidate operation, clean and invalidate
91 	 * the partial cache lines at both ends of the given range to
92 	 * prevent data corruption.
93 	 *
94 	 * For example (assume cache line size is 64 bytes):
95 	 * There are 2 consecutive 32-byte buffers, which can be cached in
96 	 * one line like below.
97 	 *			+------------------+------------------+
98 	 *	 Cache line:	| buffer 0 (dirty) |     buffer 1     |
99 	 *			+------------------+------------------+
100 	 * For the start address not aligned case, when invalidate the
101 	 * buffer 1, the full cache line will be invalidated, if the buffer
102 	 * 0 is dirty, its data will be lost.
103 	 * The same logic applies to the not aligned end address.
104 	 */
105 	if (op == K_CACHE_INVD) {
106 		if (end_addr & (line_size - 1)) {
107 			end_addr &= ~(line_size - 1);
108 			dc_ops("civac", end_addr);
109 		}
110 
111 		if (start_addr & (line_size - 1)) {
112 			start_addr &= ~(line_size - 1);
113 			if (start_addr == end_addr) {
114 				goto done;
115 			}
116 			dc_ops("civac", start_addr);
117 			start_addr += line_size;
118 		}
119 	}
120 
121 	/* Align address to line size */
122 	start_addr &= ~(line_size - 1);
123 
124 	while (start_addr < end_addr) {
125 		if (op == K_CACHE_INVD) {
126 			dc_ops("ivac", start_addr);
127 		} else if (op == K_CACHE_WB) {
128 			dc_ops("cvac", start_addr);
129 		} else if (op == K_CACHE_WB_INVD) {
130 			dc_ops("civac", start_addr);
131 		}
132 
133 		start_addr += line_size;
134 	}
135 
136 done:
137 	barrier_dsync_fence_full();
138 
139 	return 0;
140 }
141 
arch_dcache_flush_all(void)142 static ALWAYS_INLINE int arch_dcache_flush_all(void)
143 {
144 	return -ENOTSUP;
145 }
146 
arch_dcache_invd_all(void)147 static ALWAYS_INLINE int arch_dcache_invd_all(void)
148 {
149 	return -ENOTSUP;
150 }
151 
arch_dcache_flush_and_invd_all(void)152 static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
153 {
154 	return -ENOTSUP;
155 }
156 
arch_dcache_flush_range(void * addr,size_t size)157 static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t size)
158 {
159 	return arm64_dcache_range(addr, size, K_CACHE_WB);
160 }
161 
arch_dcache_invd_range(void * addr,size_t size)162 static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t size)
163 {
164 	return arm64_dcache_range(addr, size, K_CACHE_INVD);
165 }
166 
arch_dcache_flush_and_invd_range(void * addr,size_t size)167 static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t size)
168 {
169 	return arm64_dcache_range(addr, size, K_CACHE_WB_INVD);
170 }
171 
arch_dcache_enable(void)172 static ALWAYS_INLINE void arch_dcache_enable(void)
173 {
174 	/* nothing */
175 }
176 
arch_dcache_disable(void)177 static ALWAYS_INLINE void arch_dcache_disable(void)
178 {
179 	/* nothing */
180 }
181 
182 #endif /* CONFIG_DCACHE */
183 
184 #if defined(CONFIG_ICACHE)
185 
arch_icache_line_size_get(void)186 static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
187 {
188 	return -ENOTSUP;
189 }
190 
arch_icache_flush_all(void)191 static ALWAYS_INLINE int arch_icache_flush_all(void)
192 {
193 	return -ENOTSUP;
194 }
195 
arch_icache_invd_all(void)196 static ALWAYS_INLINE int arch_icache_invd_all(void)
197 {
198 	return -ENOTSUP;
199 }
200 
arch_icache_flush_and_invd_all(void)201 static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
202 {
203 	return -ENOTSUP;
204 }
205 
arch_icache_flush_range(void * addr,size_t size)206 static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
207 {
208 	ARG_UNUSED(addr);
209 	ARG_UNUSED(size);
210 	return -ENOTSUP;
211 }
212 
arch_icache_invd_range(void * addr,size_t size)213 static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
214 {
215 	ARG_UNUSED(addr);
216 	ARG_UNUSED(size);
217 	return -ENOTSUP;
218 }
219 
arch_icache_flush_and_invd_range(void * addr,size_t size)220 static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
221 {
222 	ARG_UNUSED(addr);
223 	ARG_UNUSED(size);
224 	return -ENOTSUP;
225 }
226 
arch_icache_enable(void)227 static ALWAYS_INLINE void arch_icache_enable(void)
228 {
229 	/* nothing */
230 }
231 
arch_icache_disable(void)232 static ALWAYS_INLINE void arch_icache_disable(void)
233 {
234 	/* nothing */
235 }
236 
237 #endif /* CONFIG_ICACHE */
238 
arch_cache_init(void)239 static ALWAYS_INLINE void arch_cache_init(void)
240 {
241 }
242 
243 #ifdef __cplusplus
244 }
245 #endif
246 
247 #endif /* _ASMLANGUAGE */
248 
249 #endif /* ZEPHYR_INCLUDE_ARCH_ARM64_CACHE_H_ */
250