1 /*
2 * Copyright 2016-2021 NXP
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include "fsl_cache.h"
9 /*******************************************************************************
10 * Definitions
11 ******************************************************************************/
12
13 /* Component ID definition, used by tools. */
14 #ifndef FSL_COMPONENT_ID
15 #define FSL_COMPONENT_ID "platform.drivers.cache_cache64"
16 #endif
17
18 #if (FSL_FEATURE_SOC_CACHE64_CTRL_COUNT > 0)
19 /*******************************************************************************
20 * Variables
21 ******************************************************************************/
22 /* Array of CACHE64_CTRL peripheral base address. */
23 static CACHE64_CTRL_Type *const s_cache64ctrlBases[] = CACHE64_CTRL_BASE_PTRS;
24
25 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
26 /* Array of CACHE64_POLSEL peripheral base address. */
27 static CACHE64_POLSEL_Type *const s_cache64polselBases[] = CACHE64_POLSEL_BASE_PTRS;
28 #endif
29
30 /* Array of CACHE64 physical memory base address. */
31 static uint32_t const s_cache64PhymemBases[] = CACHE64_CTRL_PHYMEM_BASES;
32 /* Array of CACHE64 physical memory size. */
33 static uint32_t const s_cache64PhymemSizes[] = CACHE64_CTRL_PHYMEM_SIZES;
34
35 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
36 #ifdef CACHE64_CLOCKS
37 /* Array of CACHE64_CTRL clock name. */
38 static const clock_ip_name_t s_cache64Clocks[] = CACHE64_CLOCKS;
39 #endif
40 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
41
42 /*******************************************************************************
43 * Code
44 ******************************************************************************/
45 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
46 /*!
47 * brief Returns an instance number given periphearl base address.
48 *
49 * param base The peripheral base address.
50 * return CACHE64_POLSEL instance number starting from 0.
51 */
CACHE64_GetInstance(CACHE64_POLSEL_Type * base)52 uint32_t CACHE64_GetInstance(CACHE64_POLSEL_Type *base)
53 {
54 uint32_t i;
55
56 for (i = 0; i < ARRAY_SIZE(s_cache64polselBases); i++)
57 {
58 if (base == s_cache64polselBases[i])
59 {
60 break;
61 }
62 }
63
64 assert(i < ARRAY_SIZE(s_cache64polselBases));
65
66 return i;
67 }
68 #endif
69
70 /*!
71 * brief Returns an instance number given physical memory address.
72 *
73 * param address The physical memory address.
74 * return CACHE64_CTRL instance number starting from 0.
75 */
CACHE64_GetInstanceByAddr(uint32_t address)76 uint32_t CACHE64_GetInstanceByAddr(uint32_t address)
77 {
78 uint32_t i;
79
80 for (i = 0; i < ARRAY_SIZE(s_cache64ctrlBases); i++)
81 {
82 if ((address >= s_cache64PhymemBases[i]) &&
83 (address < (s_cache64PhymemBases[i] + s_cache64PhymemSizes[i] - 0x01U)))
84 {
85 break;
86 }
87 }
88
89 return i;
90 }
91
92 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
93 /*!
94 * @brief Initializes an CACHE64 instance with the user configuration structure.
95 *
96 * This function configures the CACHE64 module with user-defined settings. Call the CACHE64_GetDefaultConfig() function
97 * to configure the configuration structure and get the default configuration.
98 *
99 * @param base CACHE64_POLSEL peripheral base address.
100 * @param config Pointer to a user-defined configuration structure.
101 * @retval kStatus_Success CACHE64 initialize succeed
102 */
CACHE64_Init(CACHE64_POLSEL_Type * base,const cache64_config_t * config)103 status_t CACHE64_Init(CACHE64_POLSEL_Type *base, const cache64_config_t *config)
104 {
105 volatile uint32_t *topReg = &base->REG0_TOP;
106 uint32_t i;
107 uint32_t polsel = 0;
108
109 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
110 #ifdef CACHE64_CLOCKS
111 uint32_t instance = CACHE64_GetInstance(base);
112
113 /* Enable CACHE64 clock */
114 CLOCK_EnableClock(s_cache64Clocks[instance]);
115 #endif
116 #endif
117
118 for (i = 0; i < CACHE64_REGION_NUM - 1U; i++)
119 {
120 assert((config->boundaryAddr[i] & (CACHE64_REGION_ALIGNMENT - 1U)) == 0U);
121 ((volatile uint32_t *)topReg)[i] = config->boundaryAddr[i] >= CACHE64_REGION_ALIGNMENT ?
122 config->boundaryAddr[i] - CACHE64_REGION_ALIGNMENT :
123 0U;
124 }
125
126 for (i = 0; i < CACHE64_REGION_NUM; i++)
127 {
128 polsel |= (((uint32_t)config->policy[i]) << (2U * i));
129 }
130 base->POLSEL = polsel;
131
132 return kStatus_Success;
133 }
134
135 /*!
136 * @brief Gets the default configuration structure.
137 *
138 * This function initializes the CACHE64 configuration structure to a default value. The default
139 * values are first region covers whole cacheable area, and policy set to write back.
140 *
141 * @param config Pointer to a configuration structure.
142 */
CACHE64_GetDefaultConfig(cache64_config_t * config)143 void CACHE64_GetDefaultConfig(cache64_config_t *config)
144 {
145 (void)memset(config, 0, sizeof(cache64_config_t));
146
147 config->boundaryAddr[0] = s_cache64PhymemSizes[0];
148 config->policy[0] = kCACHE64_PolicyWriteBack;
149 }
150 #endif
151
152 /*!
153 * brief Enables the cache.
154 *
155 */
CACHE64_EnableCache(CACHE64_CTRL_Type * base)156 void CACHE64_EnableCache(CACHE64_CTRL_Type *base)
157 {
158 /* if CACHE is not enabled */
159 if ((base->CCR & CACHE64_CTRL_CCR_ENCACHE_MASK) == 0x00U)
160 {
161 /* First, invalidate the entire cache. */
162 CACHE64_InvalidateCache(base);
163
164 /* Now enable the cache. */
165 base->CCR |= CACHE64_CTRL_CCR_ENCACHE_MASK;
166 }
167 }
168
169 /*!
170 * brief Disables the cache.
171 *
172 */
CACHE64_DisableCache(CACHE64_CTRL_Type * base)173 void CACHE64_DisableCache(CACHE64_CTRL_Type *base)
174 {
175 /* if CACHE is enabled */
176 if ((base->CCR & CACHE64_CTRL_CCR_ENCACHE_MASK) != 0x00U)
177 {
178 /* First, push any modified contents. */
179 CACHE64_CleanCache(base);
180
181 /* Now disable the cache. */
182 base->CCR &= ~CACHE64_CTRL_CCR_ENCACHE_MASK;
183 }
184 }
185
186 /*!
187 * brief Invalidates the cache.
188 *
189 */
CACHE64_InvalidateCache(CACHE64_CTRL_Type * base)190 void CACHE64_InvalidateCache(CACHE64_CTRL_Type *base)
191 {
192 /* Invalidate all lines in both ways and initiate the cache command. */
193 base->CCR |= CACHE64_CTRL_CCR_INVW0_MASK | CACHE64_CTRL_CCR_INVW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
194
195 /* Wait until the cache command completes. */
196 while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
197 {
198 }
199
200 /* As a precaution clear the bits to avoid inadvertently re-running this command. */
201 base->CCR &= ~(CACHE64_CTRL_CCR_INVW0_MASK | CACHE64_CTRL_CCR_INVW1_MASK);
202 }
203
204 /*!
205 * brief Invalidates cache by range.
206 *
207 * param address The physical address of cache.
208 * param size_byte size of the memory to be invalidated.
209 * note Address and size should be aligned to "L1CODCACHE_LINESIZE_BYTE".
210 * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
211 * startAddr is not aligned. For the size_byte, application should make sure the
212 * alignment or make sure the right operation order if the size_byte is not aligned.
213 */
CACHE64_InvalidateCacheByRange(uint32_t address,uint32_t size_byte)214 void CACHE64_InvalidateCacheByRange(uint32_t address, uint32_t size_byte)
215 {
216 uint32_t endAddr = address + size_byte - 0x01U;
217 uint32_t pccReg = 0;
218 /* Align address to cache line size. */
219 uint32_t startAddr = address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U);
220 uint32_t instance = CACHE64_GetInstanceByAddr(address);
221 uint32_t endLim;
222 CACHE64_CTRL_Type *base;
223
224 if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
225 {
226 return;
227 }
228 base = s_cache64ctrlBases[instance];
229 endLim = s_cache64PhymemBases[instance] + s_cache64PhymemSizes[instance] - 0x01U;
230 endAddr = endAddr > endLim ? endLim : endAddr;
231
232 /* Set the invalidate by line command and use the physical address. */
233 pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(1) | CACHE64_CTRL_CLCR_LADSEL_MASK;
234 base->CLCR = pccReg;
235
236 while (startAddr < endAddr)
237 {
238 /* Set the address and initiate the command. */
239 base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
240
241 /* Wait until the cache command completes. */
242 while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
243 {
244 }
245 startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
246 }
247 }
248
249 /*!
250 * brief Cleans the cache.
251 *
252 */
CACHE64_CleanCache(CACHE64_CTRL_Type * base)253 void CACHE64_CleanCache(CACHE64_CTRL_Type *base)
254 {
255 /* Enable the to push all modified lines. */
256 base->CCR |= CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
257
258 /* Wait until the cache command completes. */
259 while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
260 {
261 }
262
263 /* As a precaution clear the bits to avoid inadvertently re-running this command. */
264 base->CCR &= ~(CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK);
265 }
266
267 /*!
268 * brief Cleans cache by range.
269 *
270 * param address The physical address of cache.
271 * param size_byte size of the memory to be cleaned.
272 * note Address and size should be aligned to "CACHE64_LINESIZE_BYTE".
273 * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
274 * startAddr is not aligned. For the size_byte, application should make sure the
275 * alignment or make sure the right operation order if the size_byte is not aligned.
276 */
CACHE64_CleanCacheByRange(uint32_t address,uint32_t size_byte)277 void CACHE64_CleanCacheByRange(uint32_t address, uint32_t size_byte)
278 {
279 uint32_t endAddr = address + size_byte - 0x01U;
280 uint32_t pccReg = 0;
281 /* Align address to cache line size. */
282 uint32_t startAddr = address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U);
283 uint32_t instance = CACHE64_GetInstanceByAddr(address);
284 uint32_t endLim;
285 CACHE64_CTRL_Type *base;
286
287 if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
288 {
289 return;
290 }
291 base = s_cache64ctrlBases[instance];
292 endLim = s_cache64PhymemBases[instance] + s_cache64PhymemSizes[instance] - 0x01U;
293 endAddr = endAddr > endLim ? endLim : endAddr;
294
295 /* Set the push by line command. */
296 pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(2) | CACHE64_CTRL_CLCR_LADSEL_MASK;
297 base->CLCR = pccReg;
298
299 while (startAddr < endAddr)
300 {
301 /* Set the address and initiate the command. */
302 base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
303
304 /* Wait until the cache command completes. */
305 while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
306 {
307 }
308 startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
309 }
310 }
311
312 /*!
313 * brief Cleans and invalidates the cache.
314 *
315 */
CACHE64_CleanInvalidateCache(CACHE64_CTRL_Type * base)316 void CACHE64_CleanInvalidateCache(CACHE64_CTRL_Type *base)
317 {
318 /* Push and invalidate all. */
319 base->CCR |= CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_INVW0_MASK |
320 CACHE64_CTRL_CCR_INVW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
321
322 /* Wait until the cache command completes. */
323 while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
324 {
325 }
326
327 /* As a precaution clear the bits to avoid inadvertently re-running this command. */
328 base->CCR &= ~(CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_INVW0_MASK |
329 CACHE64_CTRL_CCR_INVW1_MASK);
330 }
331
332 /*!
333 * brief Cleans and invalidate cache by range.
334 *
335 * param address The physical address of cache.
336 * param size_byte size of the memory to be Cleaned and Invalidated.
337 * note Address and size should be aligned to "CACHE64_LINESIZE_BYTE".
338 * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
339 * startAddr is not aligned. For the size_byte, application should make sure the
340 * alignment or make sure the right operation order if the size_byte is not aligned.
341 */
CACHE64_CleanInvalidateCacheByRange(uint32_t address,uint32_t size_byte)342 void CACHE64_CleanInvalidateCacheByRange(uint32_t address, uint32_t size_byte)
343 {
344 uint32_t endAddr = address + size_byte - 0x01U;
345 uint32_t pccReg = 0;
346 /* Align address to cache line size. */
347 uint32_t startAddr = address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U);
348 uint32_t instance = CACHE64_GetInstanceByAddr(address);
349 uint32_t endLim;
350 CACHE64_CTRL_Type *base;
351
352 if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
353 {
354 return;
355 }
356 base = s_cache64ctrlBases[instance];
357 endLim = s_cache64PhymemBases[instance] + s_cache64PhymemSizes[instance] - 0x01U;
358 endAddr = endAddr > endLim ? endLim : endAddr;
359
360 /* Set the push by line command. */
361 pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(3) | CACHE64_CTRL_CLCR_LADSEL_MASK;
362 base->CLCR = pccReg;
363
364 while (startAddr < endAddr)
365 {
366 /* Set the address and initiate the command. */
367 base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
368
369 /* Wait until the cache command completes. */
370 while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
371 {
372 }
373 startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
374 }
375 }
376
377 #if !(defined(FSL_FEATURE_CACHE64_CTRL_HAS_NO_WRITE_BUF) && FSL_FEATURE_CACHE64_CTRL_HAS_NO_WRITE_BUF)
378 /*!
379 * brief Enable the cache write buffer.
380 *
381 */
CACHE64_EnableWriteBuffer(CACHE64_CTRL_Type * base,bool enable)382 void CACHE64_EnableWriteBuffer(CACHE64_CTRL_Type *base, bool enable)
383 {
384 if (enable)
385 {
386 base->CCR |= CACHE64_CTRL_CCR_ENWRBUF_MASK;
387 }
388 else
389 {
390 base->CCR &= ~CACHE64_CTRL_CCR_ENWRBUF_MASK;
391 }
392 }
393
394 #endif
395
396 #endif /* FSL_FEATURE_SOC_CACHE64_CTRL_COUNT > 0 */
397