1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/kernel.h>
7 #include <zephyr/drivers/cache.h>
8 #include <zephyr/sys/barrier.h>
9 #include <hal/nrf_cache.h>
10 #include <zephyr/logging/log.h>
11
12 LOG_MODULE_REGISTER(cache_nrfx, CONFIG_CACHE_LOG_LEVEL);
13
14 #if !defined(NRF_ICACHE) && defined(NRF_CACHE)
15 #define NRF_ICACHE NRF_CACHE
16 #endif
17
18 #define CACHE_BUSY_RETRY_INTERVAL_US 10
19
20
21 enum k_nrf_cache_op {
22 /*
23 * Sequentially loop through all dirty lines and write those data units to
24 * memory.
25 *
26 * This is FLUSH in Zephyr nomenclature.
27 */
28 K_NRF_CACHE_CLEAN,
29
30 /*
31 * Mark all lines as invalid, ignoring any dirty data.
32 *
33 * This is INVALIDATE in Zephyr nomenclature.
34 */
35 K_NRF_CACHE_INVD,
36
37 /*
38 * Clean followed by invalidate
39 *
40 * This is FLUSH_AND_INVALIDATE in Zephyr nomenclature.
41 */
42 K_NRF_CACHE_FLUSH,
43 };
44
is_cache_busy(NRF_CACHE_Type * cache)45 static inline bool is_cache_busy(NRF_CACHE_Type *cache)
46 {
47 #if NRF_CACHE_HAS_STATUS
48 return nrf_cache_busy_check(cache);
49 #else
50 return false;
51 #endif
52 }
53
wait_for_cache(NRF_CACHE_Type * cache)54 static inline void wait_for_cache(NRF_CACHE_Type *cache)
55 {
56 while (is_cache_busy(cache)) {
57 }
58 }
59
_cache_all(NRF_CACHE_Type * cache,enum k_nrf_cache_op op)60 static inline int _cache_all(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
61 {
62 wait_for_cache(cache);
63
64 barrier_dsync_fence_full();
65
66 switch (op) {
67
68 #if NRF_CACHE_HAS_TASK_CLEAN
69 case K_NRF_CACHE_CLEAN:
70 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANCACHE);
71 break;
72 #endif
73
74 case K_NRF_CACHE_INVD:
75 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATECACHE);
76 break;
77
78 #if NRF_CACHE_HAS_TASK_FLUSH
79 case K_NRF_CACHE_FLUSH:
80 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHCACHE);
81 break;
82 #endif
83
84 default:
85 break;
86 }
87
88 wait_for_cache(cache);
89
90 return 0;
91 }
92
93 #if NRF_CACHE_HAS_LINEADDR
_cache_line(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,uintptr_t line_addr)94 static inline void _cache_line(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, uintptr_t line_addr)
95 {
96 do {
97 wait_for_cache(cache);
98
99 nrf_cache_lineaddr_set(cache, line_addr);
100
101 barrier_dsync_fence_full();
102
103 switch (op) {
104
105 #if NRF_CACHE_HAS_TASK_CLEAN
106 case K_NRF_CACHE_CLEAN:
107 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE);
108 break;
109 #endif
110
111 case K_NRF_CACHE_INVD:
112 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE);
113 break;
114
115 #if NRF_CACHE_HAS_TASK_FLUSH
116 case K_NRF_CACHE_FLUSH:
117 nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE);
118 break;
119 #endif
120
121 default:
122 break;
123 }
124 } while (nrf_cache_lineaddr_get(cache) != line_addr);
125 }
126
_cache_range(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size)127 static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
128 size_t size)
129 {
130 uintptr_t line_addr = (uintptr_t)addr;
131 uintptr_t end_addr;
132
133 /* Some SOCs has a bug that requires to set 28th bit in the address on
134 * Trustzone secure builds.
135 */
136 if (IS_ENABLED(CONFIG_CACHE_NRF_PATCH_LINEADDR) &&
137 !IS_ENABLED(CONFIG_TRUSTED_EXECUTION_NONSECURE)) {
138 line_addr |= BIT(28);
139 }
140
141 end_addr = line_addr + size;
142
143 /*
144 * Align address to line size
145 */
146 line_addr &= ~(CONFIG_DCACHE_LINE_SIZE - 1);
147
148 do {
149 _cache_line(cache, op, line_addr);
150 line_addr += CONFIG_DCACHE_LINE_SIZE;
151 } while (line_addr < end_addr);
152
153 wait_for_cache(cache);
154
155 return 0;
156 }
157
_cache_checks(NRF_CACHE_Type * cache,enum k_nrf_cache_op op,void * addr,size_t size,bool is_range)158 static inline int _cache_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr,
159 size_t size, bool is_range)
160 {
161 /* Check if the cache is enabled */
162 if (!nrf_cache_enable_check(cache)) {
163 return -EAGAIN;
164 }
165
166 if (!is_range) {
167 return _cache_all(cache, op);
168 }
169
170 /* Check for invalid address or size */
171 if ((!addr) || (!size)) {
172 return -EINVAL;
173 }
174
175 return _cache_range(cache, op, addr, size);
176 }
177 #else
_cache_all_checks(NRF_CACHE_Type * cache,enum k_nrf_cache_op op)178 static inline int _cache_all_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op)
179 {
180 /* Check if the cache is enabled */
181 if (!nrf_cache_enable_check(cache)) {
182 return -EAGAIN;
183 }
184 return _cache_all(cache, op);
185 }
186 #endif /* NRF_CACHE_HAS_LINEADDR */
187
188 #if defined(NRF_DCACHE) && NRF_CACHE_HAS_TASKS
189
cache_data_enable(void)190 void cache_data_enable(void)
191 {
192 nrf_cache_enable(NRF_DCACHE);
193 }
194
cache_data_flush_all(void)195 int cache_data_flush_all(void)
196 {
197 #if NRF_CACHE_HAS_TASK_CLEAN
198 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
199 #else
200 return -ENOTSUP;
201 #endif
202 }
203
cache_data_disable(void)204 void cache_data_disable(void)
205 {
206 if (nrf_cache_enable_check(NRF_DCACHE)) {
207 (void)cache_data_flush_all();
208 }
209 nrf_cache_disable(NRF_DCACHE);
210 }
211
cache_data_invd_all(void)212 int cache_data_invd_all(void)
213 {
214 /* We really do not want to invalidate the whole data cache. */
215 return -ENOTSUP;
216 }
217
cache_data_flush_and_invd_all(void)218 int cache_data_flush_and_invd_all(void)
219 {
220 #if NRF_CACHE_HAS_TASK_FLUSH
221 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
222 #else
223 return -ENOTSUP;
224 #endif
225 }
226
cache_data_flush_range(void * addr,size_t size)227 int cache_data_flush_range(void *addr, size_t size)
228 {
229 #if NRF_CACHE_HAS_TASK_CLEAN
230 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, addr, size, true);
231 #else
232 return -ENOTSUP;
233 #endif
234 }
235
cache_data_invd_range(void * addr,size_t size)236 int cache_data_invd_range(void *addr, size_t size)
237 {
238 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, addr, size, true);
239 }
240
cache_data_flush_and_invd_range(void * addr,size_t size)241 int cache_data_flush_and_invd_range(void *addr, size_t size)
242 {
243 #if NRF_CACHE_HAS_TASK_FLUSH
244 return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, addr, size, true);
245 #else
246 return -ENOTSUP;
247 #endif
248 }
249
250 #else
251
cache_data_enable(void)252 void cache_data_enable(void)
253 {
254 /* Nothing */
255 }
256
cache_data_disable(void)257 void cache_data_disable(void)
258 {
259 /* Nothing */
260 }
261
cache_data_flush_all(void)262 int cache_data_flush_all(void)
263 {
264 return -ENOTSUP;
265 }
266
cache_data_invd_all(void)267 int cache_data_invd_all(void)
268 {
269 return -ENOTSUP;
270 }
271
cache_data_flush_and_invd_all(void)272 int cache_data_flush_and_invd_all(void)
273 {
274 return -ENOTSUP;
275 }
276
cache_data_flush_range(void * addr,size_t size)277 int cache_data_flush_range(void *addr, size_t size)
278 {
279 return -ENOTSUP;
280 }
281
cache_data_invd_range(void * addr,size_t size)282 int cache_data_invd_range(void *addr, size_t size)
283 {
284 return -ENOTSUP;
285 }
286
cache_data_flush_and_invd_range(void * addr,size_t size)287 int cache_data_flush_and_invd_range(void *addr, size_t size)
288 {
289 return -ENOTSUP;
290 }
291
292 #endif /* NRF_DCACHE */
293
294 #if defined(NRF_ICACHE) && NRF_CACHE_HAS_TASKS
295
cache_instr_enable(void)296 void cache_instr_enable(void)
297 {
298 nrf_cache_enable(NRF_ICACHE);
299 }
300
cache_instr_disable(void)301 void cache_instr_disable(void)
302 {
303 nrf_cache_disable(NRF_ICACHE);
304 }
305
cache_instr_flush_all(void)306 int cache_instr_flush_all(void)
307 {
308 #if NRF_CACHE_HAS_TASK_CLEAN
309 #if NRF_CACHE_HAS_LINEADDR
310 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, NULL, 0, false);
311 #else
312 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN);
313 #endif
314 #else
315 return -ENOTSUP;
316 #endif
317 }
318
cache_instr_invd_all(void)319 int cache_instr_invd_all(void)
320 {
321 #if NRF_CACHE_HAS_LINEADDR
322 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, NULL, 0, false);
323 #else
324 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_INVD);
325 #endif
326 }
327
cache_instr_flush_and_invd_all(void)328 int cache_instr_flush_and_invd_all(void)
329 {
330 #if NRF_CACHE_HAS_TASK_FLUSH
331 #if NRF_CACHE_HAS_LINEADDR
332 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, NULL, 0, false);
333 #else
334 return _cache_all_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH);
335 #endif
336 #else
337 return -ENOTSUP;
338 #endif
339 }
340
cache_instr_flush_range(void * addr,size_t size)341 int cache_instr_flush_range(void *addr, size_t size)
342 {
343 #if NRF_CACHE_HAS_TASK_CLEAN && NRF_CACHE_HAS_LINEADDR
344 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, addr, size, true);
345 #else
346 return -ENOTSUP;
347 #endif
348 }
349
cache_instr_invd_range(void * addr,size_t size)350 int cache_instr_invd_range(void *addr, size_t size)
351 {
352 #if NRF_CACHE_HAS_LINEADDR
353 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, addr, size, true);
354 #else
355 return -ENOTSUP;
356 #endif
357 }
358
cache_instr_flush_and_invd_range(void * addr,size_t size)359 int cache_instr_flush_and_invd_range(void *addr, size_t size)
360 {
361 #if NRF_CACHE_HAS_TASK_FLUSH && NRF_CACHE_HAS_LINEADDR
362 return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, addr, size, true);
363 #else
364 return -ENOTSUP;
365 #endif
366 }
367
368 #else
369
cache_instr_enable(void)370 void cache_instr_enable(void)
371 {
372 /* Nothing */
373 }
374
cache_instr_disable(void)375 void cache_instr_disable(void)
376 {
377 /* Nothing */
378 }
379
cache_instr_flush_all(void)380 int cache_instr_flush_all(void)
381 {
382 return -ENOTSUP;
383 }
384
cache_instr_invd_all(void)385 int cache_instr_invd_all(void)
386 {
387 return -ENOTSUP;
388 }
389
cache_instr_flush_and_invd_all(void)390 int cache_instr_flush_and_invd_all(void)
391 {
392 return -ENOTSUP;
393 }
394
cache_instr_flush_range(void * addr,size_t size)395 int cache_instr_flush_range(void *addr, size_t size)
396 {
397 return -ENOTSUP;
398 }
399
cache_instr_invd_range(void * addr,size_t size)400 int cache_instr_invd_range(void *addr, size_t size)
401 {
402 return -ENOTSUP;
403 }
404
cache_instr_flush_and_invd_range(void * addr,size_t size)405 int cache_instr_flush_and_invd_range(void *addr, size_t size)
406 {
407 return -ENOTSUP;
408 }
409
410 #endif /* NRF_ICACHE */
411