1 /*
2  * Copyright 2021 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
6 #define ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_
7 
8 #include <xtensa/config/core-isa.h>
9 #include <zephyr/toolchain.h>
10 #include <zephyr/sys/util.h>
11 #include <zephyr/debug/sparse.h>
12 #include <xtensa/hal.h>
13 
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17 
18 #define Z_DCACHE_MAX (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
19 
20 #if XCHAL_DCACHE_SIZE
21 BUILD_ASSERT(Z_IS_POW2(XCHAL_DCACHE_LINESIZE));
22 BUILD_ASSERT(Z_IS_POW2(Z_DCACHE_MAX));
23 #endif
24 
25 #if defined(CONFIG_DCACHE) || defined(__DOXYGEN__)
26 
27 /** Implementation of @ref arch_dcache_flush_range. */
arch_dcache_flush_range(void * addr,size_t bytes)28 static ALWAYS_INLINE int arch_dcache_flush_range(void *addr, size_t bytes)
29 {
30 #if XCHAL_DCACHE_SIZE
31 	size_t step = XCHAL_DCACHE_LINESIZE;
32 	size_t first = ROUND_DOWN(addr, step);
33 	size_t last = ROUND_UP(((long)addr) + bytes, step);
34 	size_t line;
35 
36 	for (line = first; bytes && line < last; line += step) {
37 		__asm__ volatile("dhwb %0, 0" :: "r"(line));
38 	}
39 #endif
40 	return 0;
41 }
42 
43 /** Implementation of @ref arch_dcache_flush_and_invd_range. */
arch_dcache_flush_and_invd_range(void * addr,size_t bytes)44 static ALWAYS_INLINE int arch_dcache_flush_and_invd_range(void *addr, size_t bytes)
45 {
46 #if XCHAL_DCACHE_SIZE
47 	size_t step = XCHAL_DCACHE_LINESIZE;
48 	size_t first = ROUND_DOWN(addr, step);
49 	size_t last = ROUND_UP(((long)addr) + bytes, step);
50 	size_t line;
51 
52 	for (line = first; bytes && line < last; line += step) {
53 		__asm__ volatile("dhwbi %0, 0" :: "r"(line));
54 	}
55 #endif
56 	return 0;
57 }
58 
59 /** Implementation of @ref arch_dcache_invd_range. */
arch_dcache_invd_range(void * addr,size_t bytes)60 static ALWAYS_INLINE int arch_dcache_invd_range(void *addr, size_t bytes)
61 {
62 #if XCHAL_DCACHE_SIZE
63 	size_t step = XCHAL_DCACHE_LINESIZE;
64 	size_t first = ROUND_DOWN(addr, step);
65 	size_t last = ROUND_UP(((long)addr) + bytes, step);
66 	size_t line;
67 
68 	for (line = first; bytes && line < last; line += step) {
69 		__asm__ volatile("dhi %0, 0" :: "r"(line));
70 	}
71 #endif
72 	return 0;
73 }
74 
75 /** Implementation of @ref arch_dcache_invd_all. */
arch_dcache_invd_all(void)76 static ALWAYS_INLINE int arch_dcache_invd_all(void)
77 {
78 #if XCHAL_DCACHE_SIZE
79 	size_t step = XCHAL_DCACHE_LINESIZE;
80 	size_t line;
81 
82 	for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
83 		__asm__ volatile("dii %0, 0" :: "r"(line));
84 	}
85 #endif
86 	return 0;
87 }
88 
89 /** Implementation of @ref arch_dcache_flush_all. */
arch_dcache_flush_all(void)90 static ALWAYS_INLINE int arch_dcache_flush_all(void)
91 {
92 #if XCHAL_DCACHE_SIZE
93 	size_t step = XCHAL_DCACHE_LINESIZE;
94 	size_t line;
95 
96 	for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
97 		__asm__ volatile("diwb %0, 0" :: "r"(line));
98 	}
99 #endif
100 	return 0;
101 }
102 
103 /** Implementation of @ref arch_dcache_flush_and_invd_all. */
arch_dcache_flush_and_invd_all(void)104 static ALWAYS_INLINE int arch_dcache_flush_and_invd_all(void)
105 {
106 #if XCHAL_DCACHE_SIZE
107 	size_t step = XCHAL_DCACHE_LINESIZE;
108 	size_t line;
109 
110 	for (line = 0; line < XCHAL_DCACHE_SIZE; line += step) {
111 		__asm__ volatile("diwbi %0, 0" :: "r"(line));
112 	}
113 #endif
114 	return 0;
115 }
116 
117 /** Implementation of @ref arch_dcache_enable. */
arch_dcache_enable(void)118 static ALWAYS_INLINE void arch_dcache_enable(void)
119 {
120 	/* nothing */
121 }
122 
123 /** Implementation of @ref arch_dcache_disable. */
arch_dcache_disable(void)124 static ALWAYS_INLINE void arch_dcache_disable(void)
125 {
126 	/* nothing */
127 }
128 
129 #endif /* CONFIG_DCACHE */
130 
131 #if defined(CONFIG_ICACHE) || defined(__DOXYGEN__)
132 
133 /** Implementation of @ref arch_icache_line_size_get. */
arch_icache_line_size_get(void)134 static ALWAYS_INLINE size_t arch_icache_line_size_get(void)
135 {
136 	return -ENOTSUP;
137 }
138 
139 /** Implementation of @ref arch_icache_flush_all. */
arch_icache_flush_all(void)140 static ALWAYS_INLINE int arch_icache_flush_all(void)
141 {
142 	return -ENOTSUP;
143 }
144 
145 /** Implementation of @ref arch_icache_invd_all. */
arch_icache_invd_all(void)146 static ALWAYS_INLINE int arch_icache_invd_all(void)
147 {
148 #if XCHAL_ICACHE_SIZE
149 	xthal_icache_all_invalidate();
150 #endif
151 	return 0;
152 }
153 
154 /** Implementation of @ref arch_icache_flush_and_invd_all. */
arch_icache_flush_and_invd_all(void)155 static ALWAYS_INLINE int arch_icache_flush_and_invd_all(void)
156 {
157 	return -ENOTSUP;
158 }
159 
160 /** Implementation of @ref arch_icache_flush_range. */
arch_icache_flush_range(void * addr,size_t size)161 static ALWAYS_INLINE int arch_icache_flush_range(void *addr, size_t size)
162 {
163 	return -ENOTSUP;
164 }
165 
166 /** Implementation of @ref arch_icache_invd_range. */
arch_icache_invd_range(void * addr,size_t size)167 static ALWAYS_INLINE int arch_icache_invd_range(void *addr, size_t size)
168 {
169 #if XCHAL_ICACHE_SIZE
170 	xthal_icache_region_invalidate(addr, size);
171 #endif
172 	return 0;
173 }
174 
175 /** Implementation of @ref arch_icache_flush_and_invd_range. */
arch_icache_flush_and_invd_range(void * addr,size_t size)176 static ALWAYS_INLINE int arch_icache_flush_and_invd_range(void *addr, size_t size)
177 {
178 	return -ENOTSUP;
179 }
180 
181 /** Implementation of @ref arch_icache_enable. */
arch_icache_enable(void)182 static ALWAYS_INLINE void arch_icache_enable(void)
183 {
184 	/* nothing */
185 }
186 
187 /** Implementation of @ref arch_icache_disable. */
arch_icache_disable(void)188 static ALWAYS_INLINE void arch_icache_disable(void)
189 {
190 	/* nothing */
191 }
192 
193 #endif /* CONFIG_ICACHE */
194 
195 #if defined(CONFIG_CACHE_DOUBLEMAP)
196 /**
197  * @brief Test if a pointer is in cached region.
198  *
199  * Some hardware may map the same physical memory twice
200  * so that it can be seen in both (incoherent) cached mappings
201  * and a coherent "shared" area. This tests if a particular
202  * pointer is within the cached, coherent area.
203  *
204  * @param ptr Pointer
205  *
206  * @retval True if pointer is in cached region.
207  * @retval False if pointer is not in cached region.
208  */
arch_cache_is_ptr_cached(void * ptr)209 static inline bool arch_cache_is_ptr_cached(void *ptr)
210 {
211 	size_t addr = (size_t) ptr;
212 
213 	return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
214 }
215 
216 /**
217  * @brief Test if a pointer is in un-cached region.
218  *
219  * Some hardware may map the same physical memory twice
220  * so that it can be seen in both (incoherent) cached mappings
221  * and a coherent "shared" area. This tests if a particular
222  * pointer is within the un-cached, incoherent area.
223  *
224  * @param ptr Pointer
225  *
226  * @retval True if pointer is not in cached region.
227  * @retval False if pointer is in cached region.
228  */
arch_cache_is_ptr_uncached(void * ptr)229 static inline bool arch_cache_is_ptr_uncached(void *ptr)
230 {
231 	size_t addr = (size_t) ptr;
232 
233 	return (addr >> 29) == CONFIG_XTENSA_UNCACHED_REGION;
234 }
235 
z_xtrpoflip(uint32_t addr,uint32_t rto,uint32_t rfrom)236 static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
237 {
238 	/* The math here is all compile-time: when the two regions
239 	 * differ by a power of two, we can convert between them by
240 	 * setting or clearing just one bit.  Otherwise it needs two
241 	 * operations.
242 	 */
243 	uint32_t rxor = (rto ^ rfrom) << 29;
244 
245 	rto <<= 29;
246 	if (Z_IS_POW2(rxor)) {
247 		if ((rxor & rto) == 0) {
248 			return addr & ~rxor;
249 		} else {
250 			return addr | rxor;
251 		}
252 	} else {
253 		return (addr & ~(7U << 29)) | rto;
254 	}
255 }
256 
257 /**
258  * @brief Return cached pointer to a RAM address
259  *
260  * The Xtensa coherence architecture maps addressable RAM twice, in
261  * two different 512MB regions whose L1 cache settings can be
262  * controlled independently.  So for any given pointer, it is possible
263  * to convert it to and from a cached version.
264  *
265  * This function takes a pointer to any addressable object (either in
266  * cacheable memory or not) and returns a pointer that can be used to
267  * refer to the same memory through the L1 data cache.  Data read
268  * through the resulting pointer will reflect locally cached values on
269  * the current CPU if they exist, and writes will go first into the
270  * cache and be written back later.
271  *
272  * @see arch_uncached_ptr()
273  *
274  * @param ptr A pointer to a valid C object
275  * @return A pointer to the same object via the L1 dcache
276  */
arch_cache_cached_ptr_get(void * ptr)277 static inline void __sparse_cache *arch_cache_cached_ptr_get(void *ptr)
278 {
279 	return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
280 						CONFIG_XTENSA_CACHED_REGION,
281 						CONFIG_XTENSA_UNCACHED_REGION);
282 }
283 
284 /**
285  * @brief Return uncached pointer to a RAM address
286  *
287  * The Xtensa coherence architecture maps addressable RAM twice, in
288  * two different 512MB regions whose L1 cache settings can be
289  * controlled independently.  So for any given pointer, it is possible
290  * to convert it to and from a cached version.
291  *
292  * This function takes a pointer to any addressable object (either in
293  * cacheable memory or not) and returns a pointer that can be used to
294  * refer to the same memory while bypassing the L1 data cache.  Data
295  * in the L1 cache will not be inspected nor modified by the access.
296  *
297  * @see arch_cached_ptr()
298  *
299  * @param ptr A pointer to a valid C object
300  * @return A pointer to the same object bypassing the L1 dcache
301  */
arch_cache_uncached_ptr_get(void __sparse_cache * ptr)302 static inline void *arch_cache_uncached_ptr_get(void __sparse_cache *ptr)
303 {
304 	return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
305 				   CONFIG_XTENSA_UNCACHED_REGION,
306 				   CONFIG_XTENSA_CACHED_REGION);
307 }
308 #else
arch_cache_is_ptr_cached(void * ptr)309 static inline bool arch_cache_is_ptr_cached(void *ptr)
310 {
311 	ARG_UNUSED(ptr);
312 
313 	return false;
314 }
315 
arch_cache_is_ptr_uncached(void * ptr)316 static inline bool arch_cache_is_ptr_uncached(void *ptr)
317 {
318 	ARG_UNUSED(ptr);
319 
320 	return false;
321 }
322 
arch_cache_cached_ptr_get(void * ptr)323 static inline void *arch_cache_cached_ptr_get(void *ptr)
324 {
325 	return ptr;
326 }
327 
arch_cache_uncached_ptr_get(void * ptr)328 static inline void *arch_cache_uncached_ptr_get(void *ptr)
329 {
330 	return ptr;
331 }
332 #endif
333 
334 
335 #ifdef __cplusplus
336 } /* extern "C" */
337 #endif
338 
339 #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_CACHE_H_ */
340