1 /*
2 * Copyright 2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Cortex-A/R AArch32 L1-cache maintenance operations.
10 *
11 * This module implement the cache API for Cortex-A/R AArch32 cores using CMSIS.
12 * Only L1-cache maintenance operations is supported.
13 */
14
15 #include <zephyr/kernel.h>
16 #include <zephyr/cache.h>
17 #include <cmsis_core.h>
18 #include <zephyr/sys/barrier.h>
19
20 /* Cache Type Register */
21 #define CTR_DMINLINE_SHIFT 16
22 #define CTR_DMINLINE_MASK BIT_MASK(4)
23
24 #ifdef CONFIG_DCACHE
25
26 static size_t dcache_line_size;
27
28 /**
29 * @brief Get the smallest D-cache line size.
30 *
31 * Get the smallest D-cache line size of all the data and unified caches that
32 * the processor controls.
33 */
arch_dcache_line_size_get(void)34 size_t arch_dcache_line_size_get(void)
35 {
36 uint32_t val;
37 uint32_t dminline;
38
39 if (!dcache_line_size) {
40 val = read_sysreg(ctr);
41 dminline = (val >> CTR_DMINLINE_SHIFT) & CTR_DMINLINE_MASK;
42 /* Log2 of the number of words */
43 dcache_line_size = 2 << dminline;
44 }
45
46 return dcache_line_size;
47 }
48
arch_dcache_enable(void)49 void arch_dcache_enable(void)
50 {
51 uint32_t val;
52
53 arch_dcache_invd_all();
54
55 val = __get_SCTLR();
56 val |= SCTLR_C_Msk;
57 barrier_dsync_fence_full();
58 __set_SCTLR(val);
59 barrier_isync_fence_full();
60 }
61
arch_dcache_disable(void)62 void arch_dcache_disable(void)
63 {
64 uint32_t val;
65
66 val = __get_SCTLR();
67 val &= ~SCTLR_C_Msk;
68 barrier_dsync_fence_full();
69 __set_SCTLR(val);
70 barrier_isync_fence_full();
71
72 arch_dcache_flush_and_invd_all();
73 }
74
arch_dcache_flush_all(void)75 int arch_dcache_flush_all(void)
76 {
77 L1C_CleanDCacheAll();
78
79 return 0;
80 }
81
arch_dcache_invd_all(void)82 int arch_dcache_invd_all(void)
83 {
84 L1C_InvalidateDCacheAll();
85
86 return 0;
87 }
88
arch_dcache_flush_and_invd_all(void)89 int arch_dcache_flush_and_invd_all(void)
90 {
91 L1C_CleanInvalidateDCacheAll();
92
93 return 0;
94 }
95
arch_dcache_flush_range(void * start_addr,size_t size)96 int arch_dcache_flush_range(void *start_addr, size_t size)
97 {
98 size_t line_size;
99 uintptr_t addr = (uintptr_t)start_addr;
100 uintptr_t end_addr = addr + size;
101
102 /* Align address to line size */
103 line_size = arch_dcache_line_size_get();
104 addr &= ~(line_size - 1);
105
106 while (addr < end_addr) {
107 L1C_CleanDCacheMVA((void *)addr);
108 addr += line_size;
109 }
110
111 return 0;
112 }
113
arch_dcache_invd_range(void * start_addr,size_t size)114 int arch_dcache_invd_range(void *start_addr, size_t size)
115 {
116 size_t line_size;
117 uintptr_t addr = (uintptr_t)start_addr;
118 uintptr_t end_addr = addr + size;
119
120 line_size = arch_dcache_line_size_get();
121
122 /*
123 * Clean and invalidate the partial cache lines at both ends of the
124 * given range to prevent data corruption
125 */
126 if (end_addr & (line_size - 1)) {
127 end_addr &= ~(line_size - 1);
128 L1C_CleanInvalidateDCacheMVA((void *)end_addr);
129 }
130
131 if (addr & (line_size - 1)) {
132 addr &= ~(line_size - 1);
133 if (addr == end_addr) {
134 goto done;
135 }
136 L1C_CleanInvalidateDCacheMVA((void *)addr);
137 addr += line_size;
138 }
139
140 /* Align address to line size */
141 addr &= ~(line_size - 1);
142
143 while (addr < end_addr) {
144 L1C_InvalidateDCacheMVA((void *)addr);
145 addr += line_size;
146 }
147
148 done:
149 return 0;
150 }
151
arch_dcache_flush_and_invd_range(void * start_addr,size_t size)152 int arch_dcache_flush_and_invd_range(void *start_addr, size_t size)
153 {
154 size_t line_size;
155 uintptr_t addr = (uintptr_t)start_addr;
156 uintptr_t end_addr = addr + size;
157
158 /* Align address to line size */
159 line_size = arch_dcache_line_size_get();
160 addr &= ~(line_size - 1);
161
162 while (addr < end_addr) {
163 L1C_CleanInvalidateDCacheMVA((void *)addr);
164 addr += line_size;
165 }
166
167 return 0;
168 }
169
170 #endif
171
172 #ifdef CONFIG_ICACHE
173
arch_icache_enable(void)174 void arch_icache_enable(void)
175 {
176 arch_icache_invd_all();
177 __set_SCTLR(__get_SCTLR() | SCTLR_I_Msk);
178 barrier_isync_fence_full();
179 }
180
arch_icache_disable(void)181 void arch_icache_disable(void)
182 {
183 __set_SCTLR(__get_SCTLR() & ~SCTLR_I_Msk);
184 barrier_isync_fence_full();
185 }
186
arch_icache_flush_all(void)187 int arch_icache_flush_all(void)
188 {
189 return -ENOTSUP;
190 }
191
arch_icache_invd_all(void)192 int arch_icache_invd_all(void)
193 {
194 L1C_InvalidateICacheAll();
195
196 return 0;
197 }
198
arch_icache_flush_and_invd_all(void)199 int arch_icache_flush_and_invd_all(void)
200 {
201 return -ENOTSUP;
202 }
203
arch_icache_flush_range(void * start_addr,size_t size)204 int arch_icache_flush_range(void *start_addr, size_t size)
205 {
206 return -ENOTSUP;
207 }
208
arch_icache_invd_range(void * start_addr,size_t size)209 int arch_icache_invd_range(void *start_addr, size_t size)
210 {
211 return -ENOTSUP;
212 }
213
arch_icache_flush_and_invd_range(void * start_addr,size_t size)214 int arch_icache_flush_and_invd_range(void *start_addr, size_t size)
215 {
216 return -ENOTSUP;
217 }
218
219 #endif
220
arch_cache_init(void)221 void arch_cache_init(void)
222 {
223 }
224