1 /*
2 * Copyright (c) 2015 Wind River Systems, Inc.
3 * Copyright (c) 2022 Carlo Caione <ccaione@baylibre.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #ifndef ZEPHYR_INCLUDE_CACHE_H_
9 #define ZEPHYR_INCLUDE_CACHE_H_
10
11 /**
12 * @file
13 * @brief cache API interface
14 */
15
16 #include <zephyr/kernel.h>
17 #include <zephyr/arch/cpu.h>
18 #include <zephyr/debug/sparse.h>
19
20 #ifdef __cplusplus
21 extern "C" {
22 #endif
23
24 #if defined(CONFIG_EXTERNAL_CACHE)
25 #include <zephyr/drivers/cache.h>
26
27 #elif defined(CONFIG_ARCH_CACHE)
28 #include <zephyr/arch/cache.h>
29
30 #endif
31
32 /**
33 * @defgroup cache_interface Cache Interface
34 * @ingroup os_services
35 * @{
36 */
37
38 /**
39 * @cond INTERNAL_HIDDEN
40 *
41 */
42
43 #define _CPU DT_PATH(cpus, cpu_0)
44
45 /** @endcond */
46
47 /**
48 * @brief Enable the d-cache
49 *
50 * Enable the data cache
51 *
52 */
sys_cache_data_enable(void)53 static ALWAYS_INLINE void sys_cache_data_enable(void)
54 {
55 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
56 cache_data_enable();
57 #endif
58 }
59
60 /**
61 * @brief Disable the d-cache
62 *
63 * Disable the data cache
64 *
65 */
sys_cache_data_disable(void)66 static ALWAYS_INLINE void sys_cache_data_disable(void)
67 {
68 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
69 cache_data_disable();
70 #endif
71 }
72
73 /**
74 * @brief Enable the i-cache
75 *
76 * Enable the instruction cache
77 *
78 */
sys_cache_instr_enable(void)79 static ALWAYS_INLINE void sys_cache_instr_enable(void)
80 {
81 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
82 cache_instr_enable();
83 #endif
84 }
85
86 /**
87 * @brief Disable the i-cache
88 *
89 * Disable the instruction cache
90 *
91 */
sys_cache_instr_disable(void)92 static ALWAYS_INLINE void sys_cache_instr_disable(void)
93 {
94 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
95 cache_instr_disable();
96 #endif
97 }
98
99 /**
100 * @brief Flush the d-cache
101 *
102 * Flush the whole data cache.
103 *
104 * @retval 0 If succeeded.
105 * @retval -ENOTSUP If not supported.
106 * @retval -errno Negative errno for other failures.
107 */
sys_cache_data_flush_all(void)108 static ALWAYS_INLINE int sys_cache_data_flush_all(void)
109 {
110 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
111 return cache_data_flush_all();
112 #endif
113 return -ENOTSUP;
114 }
115
116 /**
117 * @brief Flush the i-cache
118 *
119 * Flush the whole instruction cache.
120 *
121 * @retval 0 If succeeded.
122 * @retval -ENOTSUP If not supported.
123 * @retval -errno Negative errno for other failures.
124 */
sys_cache_instr_flush_all(void)125 static ALWAYS_INLINE int sys_cache_instr_flush_all(void)
126 {
127 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
128 return cache_instr_flush_all();
129 #endif
130 return -ENOTSUP;
131 }
132
133 /**
134 * @brief Invalidate the d-cache
135 *
136 * Invalidate the whole data cache.
137 *
138 * @retval 0 If succeeded.
139 * @retval -ENOTSUP If not supported.
140 * @retval -errno Negative errno for other failures.
141 */
sys_cache_data_invd_all(void)142 static ALWAYS_INLINE int sys_cache_data_invd_all(void)
143 {
144 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
145 return cache_data_invd_all();
146 #endif
147 return -ENOTSUP;
148 }
149
150 /**
151 * @brief Invalidate the i-cache
152 *
153 * Invalidate the whole instruction cache.
154 *
155 * @retval 0 If succeeded.
156 * @retval -ENOTSUP If not supported.
157 * @retval -errno Negative errno for other failures.
158 */
sys_cache_instr_invd_all(void)159 static ALWAYS_INLINE int sys_cache_instr_invd_all(void)
160 {
161 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
162 return cache_instr_invd_all();
163 #endif
164 return -ENOTSUP;
165 }
166
167 /**
168 * @brief Flush and Invalidate the d-cache
169 *
170 * Flush and Invalidate the whole data cache.
171 *
172 * @retval 0 If succeeded.
173 * @retval -ENOTSUP If not supported.
174 * @retval -errno Negative errno for other failures.
175 */
sys_cache_data_flush_and_invd_all(void)176 static ALWAYS_INLINE int sys_cache_data_flush_and_invd_all(void)
177 {
178 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
179 return cache_data_flush_and_invd_all();
180 #endif
181 return -ENOTSUP;
182 }
183
184 /**
185 * @brief Flush and Invalidate the i-cache
186 *
187 * Flush and Invalidate the whole instruction cache.
188 *
189 * @retval 0 If succeeded.
190 * @retval -ENOTSUP If not supported.
191 * @retval -errno Negative errno for other failures.
192 */
sys_cache_instr_flush_and_invd_all(void)193 static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_all(void)
194 {
195 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
196 return cache_instr_flush_and_invd_all();
197 #endif
198 return -ENOTSUP;
199 }
200
201 /**
202 * @brief Flush an address range in the d-cache
203 *
204 * Flush the specified address range of the data cache.
205 *
206 * @note the cache operations act on cache line. When multiple data structures
207 * share the same cache line being flushed, all the portions of the
208 * data structures sharing the same line will be flushed. This is usually
209 * not a problem because writing back is a non-destructive process that
210 * could be triggered by hardware at any time, so having an aligned
211 * @p addr or a padded @p size is not strictly necessary.
212 *
213 * @param addr Starting address to flush.
214 * @param size Range size.
215 *
216 * @retval 0 If succeeded.
217 * @retval -ENOTSUP If not supported.
218 * @retval -errno Negative errno for other failures.
219 */
220 __syscall_always_inline int sys_cache_data_flush_range(void *addr, size_t size);
221
z_impl_sys_cache_data_flush_range(void * addr,size_t size)222 static ALWAYS_INLINE int z_impl_sys_cache_data_flush_range(void *addr, size_t size)
223 {
224 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
225 return cache_data_flush_range(addr, size);
226 #endif
227 ARG_UNUSED(addr);
228 ARG_UNUSED(size);
229
230 return -ENOTSUP;
231 }
232
233 /**
234 * @brief Flush an address range in the i-cache
235 *
236 * Flush the specified address range of the instruction cache.
237 *
238 * @note the cache operations act on cache line. When multiple data structures
239 * share the same cache line being flushed, all the portions of the
240 * data structures sharing the same line will be flushed. This is usually
241 * not a problem because writing back is a non-destructive process that
242 * could be triggered by hardware at any time, so having an aligned
243 * @p addr or a padded @p size is not strictly necessary.
244 *
245 * @param addr Starting address to flush.
246 * @param size Range size.
247 *
248 * @retval 0 If succeeded.
249 * @retval -ENOTSUP If not supported.
250 * @retval -errno Negative errno for other failures.
251 */
sys_cache_instr_flush_range(void * addr,size_t size)252 static ALWAYS_INLINE int sys_cache_instr_flush_range(void *addr, size_t size)
253 {
254 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
255 return cache_instr_flush_range(addr, size);
256 #endif
257 ARG_UNUSED(addr);
258 ARG_UNUSED(size);
259
260 return -ENOTSUP;
261 }
262
263 /**
264 * @brief Invalidate an address range in the d-cache
265 *
266 * Invalidate the specified address range of the data cache.
267 *
268 * @note the cache operations act on cache line. When multiple data structures
269 * share the same cache line being invalidated, all the portions of the
270 * non-read-only data structures sharing the same line will be
271 * invalidated as well. This is a destructive process that could lead to
272 * data loss and/or corruption. When @p addr is not aligned to the cache
273 * line and/or @p size is not a multiple of the cache line size the
274 * behaviour is undefined.
275 *
276 * @param addr Starting address to invalidate.
277 * @param size Range size.
278 *
279 * @retval 0 If succeeded.
280 * @retval -ENOTSUP If not supported.
281 * @retval -errno Negative errno for other failures.
282 */
283 __syscall_always_inline int sys_cache_data_invd_range(void *addr, size_t size);
284
z_impl_sys_cache_data_invd_range(void * addr,size_t size)285 static ALWAYS_INLINE int z_impl_sys_cache_data_invd_range(void *addr, size_t size)
286 {
287 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
288 return cache_data_invd_range(addr, size);
289 #endif
290 ARG_UNUSED(addr);
291 ARG_UNUSED(size);
292
293 return -ENOTSUP;
294 }
295
296 /**
297 * @brief Invalidate an address range in the i-cache
298 *
299 * Invalidate the specified address range of the instruction cache.
300 *
301 * @note the cache operations act on cache line. When multiple data structures
302 * share the same cache line being invalidated, all the portions of the
303 * non-read-only data structures sharing the same line will be
304 * invalidated as well. This is a destructive process that could lead to
305 * data loss and/or corruption. When @p addr is not aligned to the cache
306 * line and/or @p size is not a multiple of the cache line size the
307 * behaviour is undefined.
308 *
309 * @param addr Starting address to invalidate.
310 * @param size Range size.
311 *
312 * @retval 0 If succeeded.
313 * @retval -ENOTSUP If not supported.
314 * @retval -errno Negative errno for other failures.
315 */
sys_cache_instr_invd_range(void * addr,size_t size)316 static ALWAYS_INLINE int sys_cache_instr_invd_range(void *addr, size_t size)
317 {
318 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
319 return cache_instr_invd_range(addr, size);
320 #endif
321 ARG_UNUSED(addr);
322 ARG_UNUSED(size);
323
324 return -ENOTSUP;
325 }
326
327 /**
328 * @brief Flush and Invalidate an address range in the d-cache
329 *
330 * Flush and Invalidate the specified address range of the data cache.
331 *
332 * @note the cache operations act on cache line. When multiple data structures
333 * share the same cache line being flushed, all the portions of the
334 * data structures sharing the same line will be flushed before being
335 * invalidated. This is usually not a problem because writing back is a
336 * non-destructive process that could be triggered by hardware at any
337 * time, so having an aligned @p addr or a padded @p size is not strictly
338 * necessary.
339 *
340 * @param addr Starting address to flush and invalidate.
341 * @param size Range size.
342 *
343 * @retval 0 If succeeded.
344 * @retval -ENOTSUP If not supported.
345 * @retval -errno Negative errno for other failures.
346 */
347 __syscall_always_inline int sys_cache_data_flush_and_invd_range(void *addr, size_t size);
348
z_impl_sys_cache_data_flush_and_invd_range(void * addr,size_t size)349 static ALWAYS_INLINE int z_impl_sys_cache_data_flush_and_invd_range(void *addr, size_t size)
350 {
351 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
352 return cache_data_flush_and_invd_range(addr, size);
353 #endif
354 ARG_UNUSED(addr);
355 ARG_UNUSED(size);
356
357 return -ENOTSUP;
358 }
359
360 /**
361 * @brief Flush and Invalidate an address range in the i-cache
362 *
363 * Flush and Invalidate the specified address range of the instruction cache.
364 *
365 * @note the cache operations act on cache line. When multiple data structures
366 * share the same cache line being flushed, all the portions of the
367 * data structures sharing the same line will be flushed before being
368 * invalidated. This is usually not a problem because writing back is a
369 * non-destructive process that could be triggered by hardware at any
370 * time, so having an aligned @p addr or a padded @p size is not strictly
371 * necessary.
372 *
373 * @param addr Starting address to flush and invalidate.
374 * @param size Range size.
375 *
376 * @retval 0 If succeeded.
377 * @retval -ENOTSUP If not supported.
378 * @retval -errno Negative errno for other failures.
379 */
sys_cache_instr_flush_and_invd_range(void * addr,size_t size)380 static ALWAYS_INLINE int sys_cache_instr_flush_and_invd_range(void *addr, size_t size)
381 {
382 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_ICACHE)
383 return cache_instr_flush_and_invd_range(addr, size);
384 #endif
385 ARG_UNUSED(addr);
386 ARG_UNUSED(size);
387
388 return -ENOTSUP;
389 }
390
391 /**
392 *
393 * @brief Get the the d-cache line size.
394 *
395 * The API is provided to get the data cache line.
396 *
397 * The cache line size is calculated (in order of priority):
398 *
399 * - At run-time when @kconfig{CONFIG_DCACHE_LINE_SIZE_DETECT} is set.
400 * - At compile time using the value set in @kconfig{CONFIG_DCACHE_LINE_SIZE}.
401 * - At compile time using the `d-cache-line-size` CPU0 property of the DT.
402 * - 0 otherwise
403 *
404 * @retval size Size of the d-cache line.
405 * @retval 0 If the d-cache is not enabled.
406 */
sys_cache_data_line_size_get(void)407 static ALWAYS_INLINE size_t sys_cache_data_line_size_get(void)
408 {
409 #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
410 return cache_data_line_size_get();
411 #elif (CONFIG_DCACHE_LINE_SIZE != 0)
412 return CONFIG_DCACHE_LINE_SIZE;
413 #else
414 return DT_PROP_OR(_CPU, d_cache_line_size, 0);
415 #endif
416 }
417
418 /**
419 *
420 * @brief Get the the i-cache line size.
421 *
422 * The API is provided to get the instruction cache line.
423 *
424 * The cache line size is calculated (in order of priority):
425 *
426 * - At run-time when @kconfig{CONFIG_ICACHE_LINE_SIZE_DETECT} is set.
427 * - At compile time using the value set in @kconfig{CONFIG_ICACHE_LINE_SIZE}.
428 * - At compile time using the `i-cache-line-size` CPU0 property of the DT.
429 * - 0 otherwise
430 *
431 * @retval size Size of the d-cache line.
432 * @retval 0 If the d-cache is not enabled.
433 */
sys_cache_instr_line_size_get(void)434 static ALWAYS_INLINE size_t sys_cache_instr_line_size_get(void)
435 {
436 #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
437 return cache_instr_line_size_get();
438 #elif (CONFIG_ICACHE_LINE_SIZE != 0)
439 return CONFIG_ICACHE_LINE_SIZE;
440 #else
441 return DT_PROP_OR(_CPU, i_cache_line_size, 0);
442 #endif
443 }
444
445 /**
446 * @brief Test if a pointer is in cached region.
447 *
448 * Some hardware may map the same physical memory twice
449 * so that it can be seen in both (incoherent) cached mappings
450 * and a coherent "shared" area. This tests if a particular
451 * pointer is within the cached, coherent area.
452 *
453 * @param ptr Pointer
454 *
455 * @retval True if pointer is in cached region.
456 * @retval False if pointer is not in cached region.
457 */
sys_cache_is_ptr_cached(void * ptr)458 static ALWAYS_INLINE bool sys_cache_is_ptr_cached(void *ptr)
459 {
460 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
461 return cache_is_ptr_cached(ptr);
462 #else
463 ARG_UNUSED(ptr);
464
465 return false;
466 #endif
467 }
468
469 /**
470 * @brief Test if a pointer is in un-cached region.
471 *
472 * Some hardware may map the same physical memory twice
473 * so that it can be seen in both (incoherent) cached mappings
474 * and a coherent "shared" area. This tests if a particular
475 * pointer is within the un-cached, incoherent area.
476 *
477 * @param ptr Pointer
478 *
479 * @retval True if pointer is not in cached region.
480 * @retval False if pointer is in cached region.
481 */
sys_cache_is_ptr_uncached(void * ptr)482 static ALWAYS_INLINE bool sys_cache_is_ptr_uncached(void *ptr)
483 {
484 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
485 return cache_is_ptr_uncached(ptr);
486 #else
487 ARG_UNUSED(ptr);
488
489 return false;
490 #endif
491 }
492
493 /**
494 * @brief Return cached pointer to a RAM address
495 *
496 * This function takes a pointer to any addressable object (either in
497 * cacheable memory or not) and returns a pointer that can be used to
498 * refer to the same memory through the L1 data cache. Data read
499 * through the resulting pointer will reflect locally cached values on
500 * the current CPU if they exist, and writes will go first into the
501 * cache and be written back later.
502 *
503 * @note This API returns the same pointer if CONFIG_CACHE_DOUBLEMAP is not
504 * enabled.
505 *
506 * @see arch_uncached_ptr()
507 *
508 * @param ptr A pointer to a valid C object
509 * @return A pointer to the same object via the L1 dcache
510 */
sys_cache_cached_ptr_get(void * ptr)511 static ALWAYS_INLINE void __sparse_cache *sys_cache_cached_ptr_get(void *ptr)
512 {
513 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
514 return cache_cached_ptr(ptr);
515 #else
516 return (__sparse_force void __sparse_cache *)ptr;
517 #endif
518 }
519
520 /**
521 * @brief Return uncached pointer to a RAM address
522 *
523 * This function takes a pointer to any addressable object (either in
524 * cacheable memory or not) and returns a pointer that can be used to
525 * refer to the same memory while bypassing the L1 data cache. Data
526 * in the L1 cache will not be inspected nor modified by the access.
527 *
528 * @note This API returns the same pointer if CONFIG_CACHE_DOUBLEMAP is not
529 * enabled.
530 *
531 * @see arch_cached_ptr()
532 *
533 * @param ptr A pointer to a valid C object
534 * @return A pointer to the same object bypassing the L1 dcache
535 */
sys_cache_uncached_ptr_get(void __sparse_cache * ptr)536 static ALWAYS_INLINE void *sys_cache_uncached_ptr_get(void __sparse_cache *ptr)
537 {
538 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_CACHE_DOUBLEMAP)
539 return cache_uncached_ptr(ptr);
540 #else
541 return (__sparse_force void *)ptr;
542 #endif
543 }
544
545
546 #ifdef CONFIG_LIBMETAL
sys_cache_flush(void * addr,size_t size)547 static ALWAYS_INLINE void sys_cache_flush(void *addr, size_t size)
548 {
549 sys_cache_data_flush_range(addr, size);
550 }
551 #endif
552
553 #include <syscalls/cache.h>
554 #ifdef __cplusplus
555 }
556 #endif
557
558 /**
559 * @}
560 */
561
562 #endif /* ZEPHYR_INCLUDE_CACHE_H_ */
563