1 /*
2  * Copyright (c) 2015 Wind River Systems, Inc.
3  * Copyright (c) 2022 Carlo Caione <ccaione@baylibre.com>
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #ifndef ZEPHYR_INCLUDE_CACHE_H_
9 #define ZEPHYR_INCLUDE_CACHE_H_
10 
11 /**
12  * @file
13  * @brief cache API interface
14  */
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/arch/cpu.h>
18 #include <zephyr/debug/sparse.h>
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23 
24 #if defined(CONFIG_EXTERNAL_CACHE)
25 #include <zephyr/drivers/cache.h>
26 
27 #elif defined(CONFIG_ARCH_CACHE)
28 #include <zephyr/arch/cache.h>
29 
30 #elif defined(CONFIG_SOC_CACHE)
31 #include <soc_cache.h>
32 
33 #endif
34 
35 /**
36  * @defgroup cache_interface Cache Interface
37  * @ingroup os_services
38  * @{
39  */
40 
41 /**
42  * @brief Enable the d-cache
43  *
44  * Enable the data cache
45  *
46  */
sys_cache_data_enable(void)47 static ALWAYS_INLINE void sys_cache_data_enable(void)
48 {
49 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
50 	cache_data_enable();
51 #endif
52 }
53 
54 /**
55  * @brief Disable the d-cache
56  *
57  * Disable the data cache
58  *
59  */
sys_cache_data_disable(void)60 static ALWAYS_INLINE void sys_cache_data_disable(void)
61 {
62 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
63 	cache_data_disable();
64 #endif
65 }
66 
67 /**
68  * @brief Enable the i-cache
69  *
70  * Enable the instruction cache
71  *
72  */
sys_cache_instr_enable(void)73 static ALWAYS_INLINE void sys_cache_instr_enable(void)
74 {
75 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
76 	cache_instr_enable();
77 #endif
78 }
79 
80 /**
81  * @brief Disable the i-cache
82  *
83  * Disable the instruction cache
84  *
85  */
sys_cache_instr_disable(void)86 static ALWAYS_INLINE void sys_cache_instr_disable(void)
87 {
88 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
89 	cache_instr_disable();
90 #endif
91 }
92 
93 /**
94  * @brief Flush the d-cache
95  *
96  * Flush the whole data cache.
97  *
98  * @retval 0 If succeeded.
99  * @retval -ENOTSUP If not supported.
100  * @retval -errno Negative errno for other failures.
101  */
sys_cache_data_flush_all(void)102 static ALWAYS_INLINE int sys_cache_data_flush_all(void)
103 {
104 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
105 	return cache_data_flush_all();
106 #endif
107 	return -ENOTSUP;
108 }
109 
110 /**
111  * @brief Flush the i-cache
112  *
113  * Flush the whole instruction cache.
114  *
115  * @retval 0 If succeeded.
116  * @retval -ENOTSUP If not supported.
117  * @retval -errno Negative errno for other failures.
118  */
sys_cache_instr_flush_all(void)119 static ALWAYS_INLINE int sys_cache_instr_flush_all(void)
120 {
121 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
122 	return cache_instr_flush_all();
123 #endif
124 	return -ENOTSUP;
125 }
126 
127 /**
128  * @brief Invalidate the d-cache
129  *
130  * Invalidate the whole data cache.
131  *
132  * @retval 0 If succeeded.
133  * @retval -ENOTSUP If not supported.
134  * @retval -errno Negative errno for other failures.
135  */
sys_cache_data_invd_all(void)136 static ALWAYS_INLINE int sys_cache_data_invd_all(void)
137 {
138 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
139 	return cache_data_invd_all();
140 #endif
141 	return -ENOTSUP;
142 }
143 
144 /**
145  * @brief Invalidate the i-cache
146  *
147  * Invalidate the whole instruction cache.
148  *
149  * @retval 0 If succeeded.
150  * @retval -ENOTSUP If not supported.
151  * @retval -errno Negative errno for other failures.
152  */
sys_cache_instr_invd_all(void)153 static ALWAYS_INLINE int sys_cache_instr_invd_all(void)
154 {
155 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
156 	return cache_instr_invd_all();
157 #endif
158 	return -ENOTSUP;
159 }
160 
161 /**
162  * @brief Flush and Invalidate the d-cache
163  *
164  * Flush and Invalidate the whole data cache.
165  *
166  * @retval 0 If succeeded.
167  * @retval -ENOTSUP If not supported.
168  * @retval -errno Negative errno for other failures.
169  */
sys_cache_data_flush_and_invd_all(void)170 static ALWAYS_INLINE int sys_cache_data_flush_and_invd_all(void)
171 {
172 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
173 	return cache_data_flush_and_invd_all();
174 #endif
175 	return -ENOTSUP;
176 }
177 
178 /**
179  * @brief Flush and Invalidate the i-cache
180  *
181  * Flush and Invalidate the whole instruction cache.
182  *
183  * @retval 0 If succeeded.
184  * @retval -ENOTSUP If not supported.
185  * @retval -errno Negative errno for other failures.
186  */
sys_cache_instr_flush_and_invd_all(void)187 static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_all(void)
188 {
189 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
190 	return cache_instr_flush_and_invd_all();
191 #endif
192 	return -ENOTSUP;
193 }
194 
195 /**
196  * @brief Flush an address range in the d-cache
197  *
198  * Flush the specified address range of the data cache.
199  *
200  * @note the cache operations act on cache line. When multiple data structures
201  *       share the same cache line being flushed, all the portions of the
202  *       data structures sharing the same line will be flushed. This is usually
203  *       not a problem because writing back is a non-destructive process that
204  *       could be triggered by hardware at any time, so having an aligned
205  *       @p addr or a padded @p size is not strictly necessary.
206  *
207  * @param addr Starting address to flush.
208  * @param size Range size.
209  *
210  * @retval 0 If succeeded.
211  * @retval -ENOTSUP If not supported.
212  * @retval -errno Negative errno for other failures.
213  */
214 __syscall_always_inline int sys_cache_data_flush_range(void *addr, size_t size);
215 
z_impl_sys_cache_data_flush_range(void * addr,size_t size)216 static ALWAYS_INLINE int z_impl_sys_cache_data_flush_range(void *addr, size_t size)
217 {
218 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
219 	return cache_data_flush_range(addr, size);
220 #endif
221 	ARG_UNUSED(addr);
222 	ARG_UNUSED(size);
223 
224 	return -ENOTSUP;
225 }
226 
227 /**
228  * @brief Flush an address range in the i-cache
229  *
230  * Flush the specified address range of the instruction cache.
231  *
232  * @note the cache operations act on cache line. When multiple data structures
233  *       share the same cache line being flushed, all the portions of the
234  *       data structures sharing the same line will be flushed. This is usually
235  *       not a problem because writing back is a non-destructive process that
236  *       could be triggered by hardware at any time, so having an aligned
237  *       @p addr or a padded @p size is not strictly necessary.
238  *
239  * @param addr Starting address to flush.
240  * @param size Range size.
241  *
242  * @retval 0 If succeeded.
243  * @retval -ENOTSUP If not supported.
244  * @retval -errno Negative errno for other failures.
245  */
sys_cache_instr_flush_range(void * addr,size_t size)246 static ALWAYS_INLINE int sys_cache_instr_flush_range(void *addr, size_t size)
247 {
248 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
249 	return cache_instr_flush_range(addr, size);
250 #endif
251 	ARG_UNUSED(addr);
252 	ARG_UNUSED(size);
253 
254 	return -ENOTSUP;
255 }
256 
257 /**
258  * @brief Invalidate an address range in the d-cache
259  *
260  * Invalidate the specified address range of the data cache.
261  *
262  * @note the cache operations act on cache line. When multiple data structures
263  *       share the same cache line being invalidated, all the portions of the
264  *       non-read-only data structures sharing the same line will be
265  *       invalidated as well. This is a destructive process that could lead to
266  *       data loss and/or corruption. When @p addr is not aligned to the cache
267  *       line and/or @p size is not a multiple of the cache line size the
268  *       behaviour is undefined.
269  *
270  * @param addr Starting address to invalidate.
271  * @param size Range size.
272  *
273  * @retval 0 If succeeded.
274  * @retval -ENOTSUP If not supported.
275  * @retval -errno Negative errno for other failures.
276  */
277 __syscall_always_inline int sys_cache_data_invd_range(void *addr, size_t size);
278 
z_impl_sys_cache_data_invd_range(void * addr,size_t size)279 static ALWAYS_INLINE int z_impl_sys_cache_data_invd_range(void *addr, size_t size)
280 {
281 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
282 	return cache_data_invd_range(addr, size);
283 #endif
284 	ARG_UNUSED(addr);
285 	ARG_UNUSED(size);
286 
287 	return -ENOTSUP;
288 }
289 
290 /**
291  * @brief Invalidate an address range in the i-cache
292  *
293  * Invalidate the specified address range of the instruction cache.
294  *
295  * @note the cache operations act on cache line. When multiple data structures
296  *       share the same cache line being invalidated, all the portions of the
297  *       non-read-only data structures sharing the same line will be
298  *       invalidated as well. This is a destructive process that could lead to
299  *       data loss and/or corruption. When @p addr is not aligned to the cache
300  *       line and/or @p size is not a multiple of the cache line size the
301  *       behaviour is undefined.
302  *
303  * @param addr Starting address to invalidate.
304  * @param size Range size.
305  *
306  * @retval 0 If succeeded.
307  * @retval -ENOTSUP If not supported.
308  * @retval -errno Negative errno for other failures.
309  */
sys_cache_instr_invd_range(void * addr,size_t size)310 static ALWAYS_INLINE int sys_cache_instr_invd_range(void *addr, size_t size)
311 {
312 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
313 	return cache_instr_invd_range(addr, size);
314 #endif
315 	ARG_UNUSED(addr);
316 	ARG_UNUSED(size);
317 
318 	return -ENOTSUP;
319 }
320 
321 /**
322  * @brief Flush and Invalidate an address range in the d-cache
323  *
324  * Flush and Invalidate the specified address range of the data cache.
325  *
326  * @note the cache operations act on cache line. When multiple data structures
327  *       share the same cache line being flushed, all the portions of the
328  *       data structures sharing the same line will be flushed before being
329  *       invalidated. This is usually not a problem because writing back is a
330  *       non-destructive process that could be triggered by hardware at any
331  *       time, so having an aligned @p addr or a padded @p size is not strictly
332  *       necessary.
333  *
334  * @param addr Starting address to flush and invalidate.
335  * @param size Range size.
336  *
337  * @retval 0 If succeeded.
338  * @retval -ENOTSUP If not supported.
339  * @retval -errno Negative errno for other failures.
340  */
341 __syscall_always_inline int sys_cache_data_flush_and_invd_range(void *addr, size_t size);
342 
z_impl_sys_cache_data_flush_and_invd_range(void * addr,size_t size)343 static ALWAYS_INLINE int z_impl_sys_cache_data_flush_and_invd_range(void *addr, size_t size)
344 {
345 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
346 	return cache_data_flush_and_invd_range(addr, size);
347 #endif
348 	ARG_UNUSED(addr);
349 	ARG_UNUSED(size);
350 
351 	return -ENOTSUP;
352 }
353 
354 /**
355  * @brief Flush and Invalidate an address range in the i-cache
356  *
357  * Flush and Invalidate the specified address range of the instruction cache.
358  *
359  * @note the cache operations act on cache line. When multiple data structures
360  *       share the same cache line being flushed, all the portions of the
361  *       data structures sharing the same line will be flushed before being
362  *       invalidated. This is usually not a problem because writing back is a
363  *       non-destructive process that could be triggered by hardware at any
364  *       time, so having an aligned @p addr or a padded @p size is not strictly
365  *       necessary.
366  *
367  * @param addr Starting address to flush and invalidate.
368  * @param size Range size.
369  *
370  * @retval 0 If succeeded.
371  * @retval -ENOTSUP If not supported.
372  * @retval -errno Negative errno for other failures.
373  */
sys_cache_instr_flush_and_invd_range(void * addr,size_t size)374 static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_range(void *addr, size_t size)
375 {
376 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
377 	return cache_instr_flush_and_invd_range(addr, size);
378 #endif
379 	ARG_UNUSED(addr);
380 	ARG_UNUSED(size);
381 
382 	return -ENOTSUP;
383 }
384 
385 /**
386  *
387  * @brief Get the d-cache line size.
388  *
389  * The API is provided to get the data cache line.
390  *
391  * The cache line size is calculated (in order of priority):
392  *
393  * - At run-time when @kconfig{CONFIG_DCACHE_LINE_SIZE_DETECT} is set.
394  * - At compile time using the value set in @kconfig{CONFIG_DCACHE_LINE_SIZE}.
395  * - 0 otherwise
396  *
397  * @retval size Size of the d-cache line.
398  * @retval 0 If the d-cache is not enabled.
399  */
sys_cache_data_line_size_get(void)400 static ALWAYS_INLINE size_t sys_cache_data_line_size_get(void)
401 {
402 #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
403 	return cache_data_line_size_get();
404 #elif defined(CONFIG_DCACHE_LINE_SIZE)
405 	return CONFIG_DCACHE_LINE_SIZE;
406 #else
407 	return 0;
408 #endif
409 }
410 
411 /**
412  *
413  * @brief Get the i-cache line size.
414  *
415  * The API is provided to get the instruction cache line.
416  *
417  * The cache line size is calculated (in order of priority):
418  *
419  * - At run-time when @kconfig{CONFIG_ICACHE_LINE_SIZE_DETECT} is set.
420  * - At compile time using the value set in @kconfig{CONFIG_ICACHE_LINE_SIZE}.
421  * - 0 otherwise
422  *
423  * @retval size Size of the d-cache line.
424  * @retval 0 If the d-cache is not enabled.
425  */
sys_cache_instr_line_size_get(void)426 static ALWAYS_INLINE size_t sys_cache_instr_line_size_get(void)
427 {
428 #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
429 	return cache_instr_line_size_get();
430 #elif defined(CONFIG_ICACHE_LINE_SIZE)
431 	return CONFIG_ICACHE_LINE_SIZE;
432 #else
433 	return 0;
434 #endif
435 }
436 
437 /**
438  * @brief Test if a pointer is in cached region.
439  *
440  * Some hardware may map the same physical memory twice
441  * so that it can be seen in both (incoherent) cached mappings
442  * and a coherent "shared" area. This tests if a particular
443  * pointer is within the cached, coherent area.
444  *
445  * @param ptr Pointer
446  *
447  * @retval True if pointer is in cached region.
448  * @retval False if pointer is not in cached region.
449  */
sys_cache_is_ptr_cached(void * ptr)450 static ALWAYS_INLINE bool sys_cache_is_ptr_cached(void *ptr)
451 {
452 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS)
453 	return cache_is_ptr_cached(ptr);
454 #else
455 	ARG_UNUSED(ptr);
456 
457 	return false;
458 #endif
459 }
460 
461 /**
462  * @brief Test if a pointer is in un-cached region.
463  *
464  * Some hardware may map the same physical memory twice
465  * so that it can be seen in both (incoherent) cached mappings
466  * and a coherent "shared" area. This tests if a particular
467  * pointer is within the un-cached, incoherent area.
468  *
469  * @param ptr Pointer
470  *
471  * @retval True if pointer is not in cached region.
472  * @retval False if pointer is in cached region.
473  */
sys_cache_is_ptr_uncached(void * ptr)474 static ALWAYS_INLINE bool sys_cache_is_ptr_uncached(void *ptr)
475 {
476 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS)
477 	return cache_is_ptr_uncached(ptr);
478 #else
479 	ARG_UNUSED(ptr);
480 
481 	return false;
482 #endif
483 }
484 
485 /**
486  * @brief Return cached pointer to a RAM address
487  *
488  * This function takes a pointer to any addressable object (either in
489  * cacheable memory or not) and returns a pointer that can be used to
490  * refer to the same memory through the L1 data cache.  Data read
491  * through the resulting pointer will reflect locally cached values on
492  * the current CPU if they exist, and writes will go first into the
493  * cache and be written back later.
494  *
495  * @note This API returns the same pointer if
496  * CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS is not enabled.
497  *
498  * @see arch_uncached_ptr()
499  *
500  * @param ptr A pointer to a valid C object
501  * @return A pointer to the same object via the L1 dcache
502  */
sys_cache_cached_ptr_get(void * ptr)503 static ALWAYS_INLINE void __sparse_cache *sys_cache_cached_ptr_get(void *ptr)
504 {
505 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS)
506 	return cache_cached_ptr(ptr);
507 #else
508 	return (__sparse_force void __sparse_cache *)ptr;
509 #endif
510 }
511 
512 /**
513  * @brief Return uncached pointer to a RAM address
514  *
515  * This function takes a pointer to any addressable object (either in
516  * cacheable memory or not) and returns a pointer that can be used to
517  * refer to the same memory while bypassing the L1 data cache.  Data
518  * in the L1 cache will not be inspected nor modified by the access.
519  *
520  * @note This API returns the same pointer if
521  * CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS is not enabled.
522  *
523  * @see arch_cached_ptr()
524  *
525  * @param ptr A pointer to a valid C object
526  * @return A pointer to the same object bypassing the L1 dcache
527  */
sys_cache_uncached_ptr_get(void __sparse_cache * ptr)528 static ALWAYS_INLINE void *sys_cache_uncached_ptr_get(void __sparse_cache *ptr)
529 {
530 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS)
531 	return cache_uncached_ptr(ptr);
532 #else
533 	return (__sparse_force void *)ptr;
534 #endif
535 }
536 
537 
538 #ifdef CONFIG_LIBMETAL
sys_cache_flush(void * addr,size_t size)539 static ALWAYS_INLINE void sys_cache_flush(void *addr, size_t size)
540 {
541 	sys_cache_data_flush_range(addr, size);
542 }
543 #endif
544 
545 #if defined(CONFIG_CACHE_CAN_SAY_MEM_COHERENCE) || defined(__DOXYGEN__)
546 /**
547  * @brief Detect memory coherence type
548  *
549  * This function returns true if the byte pointed to lies within
550  * "coherence regions" (typically implemented with uncached memory) and
551  * can safely be used in multiprocessor code without explicit flush or
552  * invalidate operations.
553  *
554  * @note The result is for only the single byte at the specified
555  * address, this API is not required to check region boundaries or to
556  * expect aligned pointers.  The expectation is that the code above
557  * will have queried the appropriate address(es).
558  *
559  * @param ptr Pointer to be checked.
560  *
561  * @return True is pointer is in any coherence regions, false otherwise.
562  */
sys_cache_is_mem_coherent(void * ptr)563 static ALWAYS_INLINE bool sys_cache_is_mem_coherent(void *ptr)
564 {
565 	return cache_is_mem_coherent(ptr);
566 }
567 #endif /* CONFIG_CACHE_CAN_SAY_MEM_COHERENCE */
568 
569 #include <zephyr/syscalls/cache.h>
570 #ifdef __cplusplus
571 }
572 #endif
573 
574 /**
575  * @}
576  */
577 
578 #endif /* ZEPHYR_INCLUDE_CACHE_H_ */
579