1 /*
2  * Copyright 2021 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #ifndef ZEPHYR_SOC_INTEL_ADSP_CACHE_H_
7 #define ZEPHYR_SOC_INTEL_ADSP_CACHE_H_
8 
9 #include <xtensa/config/core-isa.h>
10 #include <zephyr/toolchain.h>
11 #include <zephyr/sys/util.h>
12 #include <zephyr/debug/sparse.h>
13 #include <xtensa/hal.h>
14 
15 /* We can reuse most of the Xtensa cache functions. */
16 #include <zephyr/arch/xtensa/cache.h>
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 #if defined(CONFIG_DCACHE)
23 
24 #define cache_data_enable arch_dcache_enable
25 #define cache_data_disable arch_dcache_disable
26 #define cache_data_flush_all arch_dcache_flush_all
27 #define cache_data_invd_all arch_dcache_invd_all
28 #define cache_data_flush_and_invd_all arch_dcache_flush_and_invd_all
29 #define cache_data_flush_range(addr, size) arch_dcache_flush_range(addr, size)
30 #define cache_data_invd_range(addr, size) arch_dcache_invd_range(addr, size)
31 #define cache_data_flush_and_invd_range(addr, size) arch_dcache_flush_and_invd_range(addr, size)
32 #define cache_data_line_size_get arch_dcache_line_size_get
33 
34 #endif /* CONFIG_DCACHE */
35 
36 #if defined(CONFIG_ICACHE)
37 
38 #define cache_instr_enable arch_icache_enable
39 #define cache_instr_disable arch_icache_disable
40 #define cache_instr_flush_all arch_icache_flush_all
41 #define cache_instr_invd_all arch_icache_invd_all
42 #define cache_instr_flush_and_invd_all arch_icache_flush_and_invd_all
43 #define cache_instr_flush_range(addr, size) arch_icache_flush_range(addr, size)
44 #define cache_instr_invd_range(addr, size) arch_icache_invd_range(addr, size)
45 #define cache_instr_flush_and_invd_range(addr, size) arch_icache_flush_and_invd_range(addr, size)
46 #define cache_instr_line_size_get arch_icache_line_size_get
47 
48 #endif /* CONFIG_ICACHE */
49 
50 #if defined(CONFIG_CACHE_HAS_MIRRORED_MEMORY_REGIONS)
51 /**
52  * @brief Test if a pointer is in cached region.
53  *
54  * Some hardware may map the same physical memory twice
55  * so that it can be seen in both (incoherent) cached mappings
56  * and a coherent "shared" area. This tests if a particular
57  * pointer is within the cached, coherent area.
58  *
59  * @param ptr Pointer
60  *
61  * @retval True if pointer is in cached region.
62  * @retval False if pointer is not in cached region.
63  */
soc_cache_is_ptr_cached(void * ptr)64 static ALWAYS_INLINE bool soc_cache_is_ptr_cached(void *ptr)
65 {
66 	size_t addr = (size_t) ptr;
67 
68 	return (addr >> 29) == CONFIG_INTEL_ADSP_CACHED_REGION;
69 }
70 
71 /**
72  * @brief Test if a pointer is in un-cached region.
73  *
74  * Some hardware may map the same physical memory twice
75  * so that it can be seen in both (incoherent) cached mappings
76  * and a coherent "shared" area. This tests if a particular
77  * pointer is within the un-cached, incoherent area.
78  *
79  * @param ptr Pointer
80  *
81  * @retval True if pointer is not in cached region.
82  * @retval False if pointer is in cached region.
83  */
soc_cache_is_ptr_uncached(void * ptr)84 static ALWAYS_INLINE bool soc_cache_is_ptr_uncached(void *ptr)
85 {
86 	size_t addr = (size_t) ptr;
87 
88 	return (addr >> 29) == CONFIG_INTEL_ADSP_UNCACHED_REGION;
89 }
90 
z_xtrpoflip(uint32_t addr,uint32_t rto,uint32_t rfrom)91 static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t rfrom)
92 {
93 	/* The math here is all compile-time: when the two regions
94 	 * differ by a power of two, we can convert between them by
95 	 * setting or clearing just one bit.  Otherwise it needs two
96 	 * operations.
97 	 */
98 	uint32_t rxor = (rto ^ rfrom) << 29;
99 
100 	rto <<= 29;
101 	if (Z_IS_POW2(rxor)) {
102 		if ((rxor & rto) == 0) {
103 			return addr & ~rxor;
104 		} else {
105 			return addr | rxor;
106 		}
107 	} else {
108 		return (addr & ~(7U << 29)) | rto;
109 	}
110 }
111 
112 /**
113  * @brief Return cached pointer to a RAM address
114  *
115  * The Xtensa coherence architecture maps addressable RAM twice, in
116  * two different 512MB regions whose L1 cache settings can be
117  * controlled independently.  So for any given pointer, it is possible
118  * to convert it to and from a cached version.
119  *
120  * This function takes a pointer to any addressable object (either in
121  * cacheable memory or not) and returns a pointer that can be used to
122  * refer to the same memory through the L1 data cache.  Data read
123  * through the resulting pointer will reflect locally cached values on
124  * the current CPU if they exist, and writes will go first into the
125  * cache and be written back later.
126  *
127  * @see arch_uncached_ptr()
128  *
129  * @param ptr A pointer to a valid C object
130  * @return A pointer to the same object via the L1 dcache
131  */
soc_cache_cached_ptr(void * ptr)132 static ALWAYS_INLINE void __sparse_cache *soc_cache_cached_ptr(void *ptr)
133 {
134 	return (__sparse_force void __sparse_cache *)z_xtrpoflip((uint32_t) ptr,
135 						CONFIG_INTEL_ADSP_CACHED_REGION,
136 						CONFIG_INTEL_ADSP_UNCACHED_REGION);
137 }
138 
139 /**
140  * @brief Return uncached pointer to a RAM address
141  *
142  * The Xtensa coherence architecture maps addressable RAM twice, in
143  * two different 512MB regions whose L1 cache settings can be
144  * controlled independently.  So for any given pointer, it is possible
145  * to convert it to and from a cached version.
146  *
147  * This function takes a pointer to any addressable object (either in
148  * cacheable memory or not) and returns a pointer that can be used to
149  * refer to the same memory while bypassing the L1 data cache.  Data
150  * in the L1 cache will not be inspected nor modified by the access.
151  *
152  * @see arch_cached_ptr()
153  *
154  * @param ptr A pointer to a valid C object
155  * @return A pointer to the same object bypassing the L1 dcache
156  */
soc_cache_uncached_ptr(void __sparse_cache * ptr)157 static ALWAYS_INLINE void *soc_cache_uncached_ptr(void __sparse_cache *ptr)
158 {
159 	return (void *)z_xtrpoflip((__sparse_force uint32_t)ptr,
160 				   CONFIG_INTEL_ADSP_UNCACHED_REGION,
161 				   CONFIG_INTEL_ADSP_CACHED_REGION);
162 }
163 #else
164 static ALWAYS_INLINE bool soc_cache_is_ptr_cached(void *ptr)
165 {
166 	ARG_UNUSED(ptr);
167 
168 	return false;
169 }
170 
171 static ALWAYS_INLINE bool soc_cache_is_ptr_uncached(void *ptr)
172 {
173 	ARG_UNUSED(ptr);
174 
175 	return false;
176 }
177 
178 static ALWAYS_INLINE void *soc_cache_cached_ptr(void *ptr)
179 {
180 	return ptr;
181 }
182 
183 static ALWAYS_INLINE void *soc_cache_uncached_ptr(void *ptr)
184 {
185 	return ptr;
186 }
187 #endif
188 
189 #ifdef cache_is_ptr_cached
190 #undef cache_is_ptr_cached
191 #endif
192 #define cache_is_ptr_cached(ptr) soc_cache_is_ptr_cached(ptr)
193 
194 #ifdef cache_is_ptr_uncached
195 #undef cache_is_ptr_uncached
196 #endif
197 #define cache_is_ptr_uncached(ptr) soc_cache_is_ptr_uncached(ptr)
198 
199 #ifdef cache_cached_ptr
200 #undef cache_cached_ptr
201 #endif
202 #define cache_cached_ptr(ptr) soc_cache_cached_ptr(ptr)
203 
204 #ifdef cache_uncached_ptr
205 #undef cache_uncached_ptr
206 #endif
207 #define cache_uncached_ptr(ptr) soc_cache_uncached_ptr(ptr)
208 
soc_cache_init(void)209 static ALWAYS_INLINE void soc_cache_init(void)
210 {
211 }
212 
213 #if defined(CONFIG_CACHE_CAN_SAY_MEM_COHERENCE)
soc_cache_is_mem_coherent(void * ptr)214 static ALWAYS_INLINE bool soc_cache_is_mem_coherent(void *ptr)
215 {
216 	size_t addr = (size_t) ptr;
217 
218 	return (addr >> 29) == CONFIG_INTEL_ADSP_UNCACHED_REGION;
219 }
220 
221 #ifdef cache_is_mem_coherent
222 #undef cache_is_mem_coherent
223 #endif
224 #define cache_is_mem_coherent(ptr) soc_cache_is_mem_coherent(ptr)
225 
226 #endif
227 
228 #ifdef __cplusplus
229 } /* extern "C" */
230 #endif
231 
232 #endif /* ZEPHYR_SOC_INTEL_ADSP_CACHE_H_ */
233