1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/kernel.h>
7 #include <zephyr/drivers/cache.h>
8 #include <hal/nrf_cache.h>
9 #include <zephyr/logging/log.h>
10
11 LOG_MODULE_REGISTER(cache_nrfx, CONFIG_CACHE_LOG_LEVEL);
12
13 #if !defined(NRF_ICACHE) && defined(NRF_CACHE)
14 #define NRF_ICACHE NRF_CACHE
15 #endif
16
17 #define CACHE_BUSY_RETRY_INTERVAL_US 10
18
19
20 enum k_nrf_cache_op {
21 /*
22 * Sequentially loop through all dirty lines and write those data units to
23 * memory.
24 *
25 * This is FLUSH in Zephyr nomenclature.
26 */
27 K_NRF_CACHE_CLEAN,
28
29 /*
30 * Mark all lines as invalid, ignoring any dirty data.
31 *
32 * This is INVALIDATE in Zephyr nomenclature.
33 */
34 K_NRF_CACHE_INVD,
35
36 /*
37 * Clean followed by invalidate
38 *
39 * This is FLUSH_AND_INVALIDATE in Zephyr nomenclature.
40 */
41 K_NRF_CACHE_FLUSH,
42 };
43
is_cache_busy(NRF_CACHE_Type * cache)44 static inline bool is_cache_busy(NRF_CACHE_Type *cache)
45 {
46 #if NRF_CACHE_HAS_STATUS
47 return nrf_cache_busy_check(cache);
48 #else
49 return false;
50 #endif
51 }
52
wait_for_cache(NRF_CACHE_Type * cache)53 static inline void wait_for_cache(NRF_CACHE_Type *cache)
54 {
55 while (is_cache_busy(cache)) {
56 }
57 }
58
_cache_all(NRF_CACHE_Type * cache,enum k_nrf_cache_op op)59 static inline int _cache_all(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
60 {
61 /*
62 * We really do not want to invalidate the whole cache.
63 */
64 if (op == K_NRF_CACHE_INVD) {
65 return -ENOTSUP;
66 }
67
68 wait_for_cache(cache);
69
70 switch (op) {
71
72 #if NRF_CACHE_HAS_TASK_CLEAN
73 case K_NRF_CACHE_CLEAN:
74 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANCACHE);
75 break;
76 #endif
77
78 case K_NRF_CACHE_INVD:
79 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATECACHE);
80 break;
81
82 #if NRF_CACHE_HAS_TASK_FLUSH
83 case K_NRF_CACHE_FLUSH:
84 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHCACHE);
85 break;
86 #endif
87
88 default:
89 break;
90 }
91
92 wait_for_cache(cache);
93
94 return 0;
95 }
96
_cache_line(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,uintptr_t line_addr)97 static inline void _cache_line(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, uintptr_t line_addr)
98 {
99 do {
100 wait_for_cache(cache);
101
102 nrf_cache_lineaddr_set(cache, line_addr);
103
104 switch (op) {
105
106 #if NRF_CACHE_HAS_TASK_CLEAN
107 case K_NRF_CACHE_CLEAN:
108 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE);
109 break;
110 #endif
111
112 case K_NRF_CACHE_INVD:
113 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE);
114 break;
115
116 #if NRF_CACHE_HAS_TASK_FLUSH
117 case K_NRF_CACHE_FLUSH:
118 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE);
119 break;
120 #endif
121
122 default:
123 break;
124 }
125 } while (nrf_cache_lineaddr_get(cache) != line_addr);
126 }
127
_cache_range(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size)128 static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
129 size_t size)
130 {
131 uintptr_t line_addr = (uintptr_t)addr;
132 uintptr_t end_addr;
133
134 /* Some SOCs has a bug that requires to set 28th bit in the address on
135 * Trustzone secure builds.
136 */
137 if (IS_ENABLED(CONFIG_CACHE_NRF_PATCH_LINEADDR) &&
138 !IS_ENABLED(CONFIG_TRUSTED_EXECUTION_NONSECURE)) {
139 line_addr |= BIT(28);
140 }
141
142 end_addr = line_addr + size;
143
144 /*
145 * Align address to line size
146 */
147 line_addr &= ~(CONFIG_DCACHE_LINE_SIZE - 1);
148
149 do {
150 _cache_line(cache, op, line_addr);
151 line_addr += CONFIG_DCACHE_LINE_SIZE;
152 } while (line_addr < end_addr);
153
154 wait_for_cache(cache);
155
156 return 0;
157 }
158
_cache_checks(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size,bool is_range)159 static inline int _cache_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
160 size_t size, bool is_range)
161 {
162 /* Check if the cache is enabled */
163 if (!(cache->ENABLE & CACHE_ENABLE_ENABLE_Enabled)) {
164 return -EAGAIN;
165 }
166
167 if (!is_range) {
168 return _cache_all(cache, op);
169 }
170
171 /* Check for invalid address or size */
172 if ((!addr) || (!size)) {
173 return -EINVAL;
174 }
175
176 return _cache_range(cache, op, addr, size);
177 }
178
179 #if defined(NRF_DCACHE) && NRF_CACHE_HAS_TASKS
180
cache_data_enable(void)181 void cache_data_enable(void)
182 {
183 nrf_cache_enable(NRF_DCACHE);
184 }
185
cache_data_flush_all(void)186 int cache_data_flush_all(void)
187 {
188 #if NRF_CACHE_HAS_TASK_CLEAN
189 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
190 #else
191 return -ENOTSUP;
192 #endif
193 }
194
cache_data_disable(void)195 void cache_data_disable(void)
196 {
197 if (nrf_cache_enable_check(NRF_DCACHE)) {
198 (void)cache_data_flush_all();
199 }
200 nrf_cache_disable(NRF_DCACHE);
201 }
202
cache_data_invd_all(void)203 int cache_data_invd_all(void)
204 {
205 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, NULL, 0, false);
206 }
207
cache_data_flush_and_invd_all(void)208 int cache_data_flush_and_invd_all(void)
209 {
210 #if NRF_CACHE_HAS_TASK_FLUSH
211 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
212 #else
213 return -ENOTSUP;
214 #endif
215 }
216
cache_data_flush_range(void * addr,size_t size)217 int cache_data_flush_range(void *addr, size_t size)
218 {
219 #if NRF_CACHE_HAS_TASK_CLEAN
220 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, addr, size, true);
221 #else
222 return -ENOTSUP;
223 #endif
224 }
225
cache_data_invd_range(void * addr,size_t size)226 int cache_data_invd_range(void *addr, size_t size)
227 {
228 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, addr, size, true);
229 }
230
cache_data_flush_and_invd_range(void * addr,size_t size)231 int cache_data_flush_and_invd_range(void *addr, size_t size)
232 {
233 #if NRF_CACHE_HAS_TASK_FLUSH
234 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, addr, size, true);
235 #else
236 return -ENOTSUP;
237 #endif
238 }
239
240 #else
241
cache_data_enable(void)242 void cache_data_enable(void)
243 {
244 /* Nothing */
245 }
246
cache_data_disable(void)247 void cache_data_disable(void)
248 {
249 /* Nothing */
250 }
251
cache_data_flush_all(void)252 int cache_data_flush_all(void)
253 {
254 return -ENOTSUP;
255 }
256
cache_data_invd_all(void)257 int cache_data_invd_all(void)
258 {
259 return -ENOTSUP;
260 }
261
cache_data_flush_and_invd_all(void)262 int cache_data_flush_and_invd_all(void)
263 {
264 return -ENOTSUP;
265 }
266
cache_data_flush_range(void * addr,size_t size)267 int cache_data_flush_range(void *addr, size_t size)
268 {
269 return -ENOTSUP;
270 }
271
cache_data_invd_range(void * addr,size_t size)272 int cache_data_invd_range(void *addr, size_t size)
273 {
274 return -ENOTSUP;
275 }
276
cache_data_flush_and_invd_range(void * addr,size_t size)277 int cache_data_flush_and_invd_range(void *addr, size_t size)
278 {
279 return -ENOTSUP;
280 }
281
282 #endif /* NRF_DCACHE */
283
284 #if defined(NRF_ICACHE) && NRF_CACHE_HAS_TASKS
285
cache_instr_enable(void)286 void cache_instr_enable(void)
287 {
288 nrf_cache_enable(NRF_ICACHE);
289 }
290
cache_instr_disable(void)291 void cache_instr_disable(void)
292 {
293 nrf_cache_disable(NRF_ICACHE);
294 }
295
cache_instr_flush_all(void)296 int cache_instr_flush_all(void)
297 {
298 #if NRF_CACHE_HAS_TASK_CLEAN
299 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
300 #else
301 return -ENOTSUP;
302 #endif
303 }
304
cache_instr_invd_all(void)305 int cache_instr_invd_all(void)
306 {
307 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, NULL, 0, false);
308 }
309
cache_instr_flush_and_invd_all(void)310 int cache_instr_flush_and_invd_all(void)
311 {
312 #if NRF_CACHE_HAS_TASK_FLUSH
313 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
314 #else
315 return -ENOTSUP;
316 #endif
317 }
318
cache_instr_flush_range(void * addr,size_t size)319 int cache_instr_flush_range(void *addr, size_t size)
320 {
321 #if NRF_CACHE_HAS_TASK_CLEAN
322 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, addr, size, true);
323 #else
324 return -ENOTSUP;
325 #endif
326 }
327
cache_instr_invd_range(void * addr,size_t size)328 int cache_instr_invd_range(void *addr, size_t size)
329 {
330 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, addr, size, true);
331 }
332
cache_instr_flush_and_invd_range(void * addr,size_t size)333 int cache_instr_flush_and_invd_range(void *addr, size_t size)
334 {
335 #if NRF_CACHE_HAS_TASK_FLUSH
336 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, addr, size, true);
337 #else
338 return -ENOTSUP;
339 #endif
340 }
341
342 #else
343
cache_instr_enable(void)344 void cache_instr_enable(void)
345 {
346 /* Nothing */
347 }
348
cache_instr_disable(void)349 void cache_instr_disable(void)
350 {
351 /* Nothing */
352 }
353
cache_instr_flush_all(void)354 int cache_instr_flush_all(void)
355 {
356 return -ENOTSUP;
357 }
358
cache_instr_invd_all(void)359 int cache_instr_invd_all(void)
360 {
361 return -ENOTSUP;
362 }
363
cache_instr_flush_and_invd_all(void)364 int cache_instr_flush_and_invd_all(void)
365 {
366 return -ENOTSUP;
367 }
368
cache_instr_flush_range(void * addr,size_t size)369 int cache_instr_flush_range(void *addr, size_t size)
370 {
371 return -ENOTSUP;
372 }
373
cache_instr_invd_range(void * addr,size_t size)374 int cache_instr_invd_range(void *addr, size_t size)
375 {
376 return -ENOTSUP;
377 }
378
cache_instr_flush_and_invd_range(void * addr,size_t size)379 int cache_instr_flush_and_invd_range(void *addr, size_t size)
380 {
381 return -ENOTSUP;
382 }
383
384 #endif /* NRF_ICACHE */
385