1 /*
2  * Copyright 2016-2021, 2023-2024 NXP
3  * All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include "fsl_cache.h"
9 /*******************************************************************************
10  * Definitions
11  ******************************************************************************/
12 
13 /* Component ID definition, used by tools. */
14 #ifndef FSL_COMPONENT_ID
15 #define FSL_COMPONENT_ID "platform.drivers.cache_cache64"
16 #endif
17 
18 #if (FSL_FEATURE_SOC_CACHE64_CTRL_COUNT > 0)
19 /*******************************************************************************
20  * Variables
21  ******************************************************************************/
22 /* Array of CACHE64_CTRL peripheral base address. */
23 static CACHE64_CTRL_Type *const s_cache64ctrlBases[] = CACHE64_CTRL_BASE_PTRS;
24 
25 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
26 /* Array of CACHE64_POLSEL peripheral base address. */
27 static CACHE64_POLSEL_Type *const s_cache64polselBases[] = CACHE64_POLSEL_BASE_PTRS;
28 #endif
29 
30 #if (defined(CACHE64_CTRL_PHYMEM_BASE_ALIAS_COUNT))
31 #define CACHE64_PHYMEM_COLUM_COUNT CACHE64_CTRL_PHYMEM_BASE_ALIAS_COUNT
32 /* Array of CACHE64 physical memory base address,
33   it is a 2D array, the row indicate cache instance,
34   the column indicate the alias of one instance.  */
35 static uint32_t const s_cache64PhymemBases[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT] = CACHE64_CTRL_PHYMEM_BASES;
36 /* Array of CACHE64 physical size base address,
37   it is a 2D array, the row indicate cache instance,
38   the column indicate the alias of one instance.  */
39 static uint32_t const s_cache64PhymemSizes[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT] = CACHE64_CTRL_PHYMEM_SIZES;
40 #else
41 #define CACHE64_PHYMEM_COLUM_COUNT 1
42 static uint32_t const s_cache64PhymemBases[] = CACHE64_CTRL_PHYMEM_BASES;
43 static uint32_t const s_cache64PhymemSizes[] = CACHE64_CTRL_PHYMEM_SIZES;
44 #endif
45 
46 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
47 #ifdef CACHE64_CLOCKS
48 /* Array of CACHE64_CTRL clock name. */
49 static const clock_ip_name_t s_cache64Clocks[] = CACHE64_CLOCKS;
50 #endif
51 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
52 
53 volatile uint8_t g_cache64MemPhyAliasId = 0U;
54 
55 /*******************************************************************************
56  * Code
57  ******************************************************************************/
58 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
59 /*!
60  * brief Returns an instance number given periphearl base address.
61  *
62  * param base The peripheral base address.
63  * return CACHE64_POLSEL instance number starting from 0.
64  */
CACHE64_GetInstance(CACHE64_POLSEL_Type * base)65 uint32_t CACHE64_GetInstance(CACHE64_POLSEL_Type *base)
66 {
67     uint32_t i;
68 
69     for (i = 0; i < ARRAY_SIZE(s_cache64polselBases); i++)
70     {
71         if (MSDK_REG_SECURE_ADDR(base) == MSDK_REG_SECURE_ADDR(s_cache64polselBases[i]))
72         {
73             break;
74         }
75     }
76 
77     assert(i < ARRAY_SIZE(s_cache64polselBases));
78 
79     return i;
80 }
81 #endif
82 
83 /*!
84  * brief Returns an instance number given physical memory address.
85  *
86  * param address The physical memory address.
87  * return CACHE64_CTRL instance number starting from 0.
88  */
CACHE64_GetInstanceByAddr(uint32_t address)89 uint32_t CACHE64_GetInstanceByAddr(uint32_t address)
90 {
91     uint32_t i = 0UL;
92     uint32_t phyMemBase[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
93     uint32_t phyMemSize[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
94     memcpy(phyMemBase, s_cache64PhymemBases, sizeof(s_cache64PhymemBases));
95     memcpy(phyMemSize, s_cache64PhymemSizes, sizeof(s_cache64PhymemSizes));
96 
97     while(i < ARRAY_SIZE(s_cache64ctrlBases))
98     {
99         g_cache64MemPhyAliasId = 0U;
100         while(g_cache64MemPhyAliasId < CACHE64_PHYMEM_COLUM_COUNT)
101         {
102             if ((MSDK_REG_SECURE_ADDR(address) >= MSDK_REG_SECURE_ADDR(phyMemBase[i][g_cache64MemPhyAliasId])) && (MSDK_REG_SECURE_ADDR(address) < MSDK_REG_SECURE_ADDR(phyMemBase[i][g_cache64MemPhyAliasId] + phyMemSize[i][g_cache64MemPhyAliasId] - 0x01U)))
103             {
104                 return i;
105             }
106             g_cache64MemPhyAliasId++;
107         }
108         i++;
109     }
110 
111     return 0xFFFFFFFFUL;
112 }
113 
114 #if (defined(FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT) && (FSL_FEATURE_SOC_CACHE64_POLSEL_COUNT > 0))
115 /*!
116  * @brief Initializes an CACHE64 instance with the user configuration structure.
117  *
118  * This function configures the CACHE64 module with user-defined settings. Call the CACHE64_GetDefaultConfig() function
119  * to configure the configuration structure and get the default configuration.
120  *
121  * @param base CACHE64_POLSEL peripheral base address.
122  * @param config Pointer to a user-defined configuration structure.
123  * @retval kStatus_Success CACHE64 initialize succeed
124  */
CACHE64_Init(CACHE64_POLSEL_Type * base,const cache64_config_t * config)125 status_t CACHE64_Init(CACHE64_POLSEL_Type *base, const cache64_config_t *config)
126 {
127     volatile uint32_t *topReg = &base->REG0_TOP;
128     uint32_t i;
129     uint32_t polsel = 0;
130 
131 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
132 #ifdef CACHE64_CLOCKS
133     uint32_t instance = CACHE64_GetInstance(base);
134 
135     /* Enable CACHE64 clock */
136     CLOCK_EnableClock(s_cache64Clocks[instance]);
137 #endif
138 #endif
139 
140     for (i = 0; i < CACHE64_REGION_NUM - 1U; i++)
141     {
142         assert((config->boundaryAddr[i] & (CACHE64_REGION_ALIGNMENT - 1U)) == 0U);
143         ((volatile uint32_t *)topReg)[i] = config->boundaryAddr[i] >= CACHE64_REGION_ALIGNMENT ?
144                                                config->boundaryAddr[i] - CACHE64_REGION_ALIGNMENT :
145                                                0U;
146     }
147 
148     for (i = 0; i < CACHE64_REGION_NUM; i++)
149     {
150         polsel |= (((uint32_t)config->policy[i]) << (2U * i));
151     }
152     base->POLSEL = polsel;
153 
154     return kStatus_Success;
155 }
156 
157 /*!
158  * @brief Gets the default configuration structure.
159  *
160  * This function initializes the CACHE64 configuration structure to a default value. The default
161  * values are first region covers whole cacheable area, and policy set to write back.
162  *
163  * @param config Pointer to a configuration structure.
164  */
CACHE64_GetDefaultConfig(cache64_config_t * config)165 void CACHE64_GetDefaultConfig(cache64_config_t *config)
166 {
167     uint32_t phyMemSize[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
168     memcpy(phyMemSize, s_cache64PhymemSizes, sizeof(s_cache64PhymemSizes));
169     (void)memset(config, 0, sizeof(cache64_config_t));
170 
171     config->boundaryAddr[0] = phyMemSize[0][g_cache64MemPhyAliasId];
172     config->policy[0]       = kCACHE64_PolicyWriteBack;
173 }
174 #endif
175 
176 /*!
177  * brief Enables the cache.
178  *
179  */
CACHE64_EnableCache(CACHE64_CTRL_Type * base)180 void CACHE64_EnableCache(CACHE64_CTRL_Type *base)
181 {
182     /* if CACHE is not enabled */
183     if ((base->CCR & CACHE64_CTRL_CCR_ENCACHE_MASK) == 0x00U)
184     {
185         /* First, invalidate the entire cache. */
186         CACHE64_InvalidateCache(base);
187 
188         /* Now enable the cache. */
189         base->CCR |= CACHE64_CTRL_CCR_ENCACHE_MASK;
190     }
191 }
192 
193 /*!
194  * brief Disables the cache.
195  *
196  */
CACHE64_DisableCache(CACHE64_CTRL_Type * base)197 void CACHE64_DisableCache(CACHE64_CTRL_Type *base)
198 {
199     /* if CACHE is enabled */
200     if ((base->CCR & CACHE64_CTRL_CCR_ENCACHE_MASK) != 0x00U)
201     {
202         /* First, push any modified contents. */
203         CACHE64_CleanCache(base);
204 
205         /* Now disable the cache. */
206         base->CCR &= ~CACHE64_CTRL_CCR_ENCACHE_MASK;
207     }
208 }
209 
210 /*!
211  * brief Invalidates the cache.
212  *
213  */
CACHE64_InvalidateCache(CACHE64_CTRL_Type * base)214 void CACHE64_InvalidateCache(CACHE64_CTRL_Type *base)
215 {
216     /* Invalidate all lines in both ways and initiate the cache command. */
217     base->CCR |= CACHE64_CTRL_CCR_INVW0_MASK | CACHE64_CTRL_CCR_INVW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
218 
219     /* Wait until the cache command completes. */
220     while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
221     {
222     }
223 
224     /* As a precaution clear the bits to avoid inadvertently re-running this command. */
225     base->CCR &= ~(CACHE64_CTRL_CCR_INVW0_MASK | CACHE64_CTRL_CCR_INVW1_MASK);
226 }
227 
228 /*!
229  * brief Invalidates cache by range.
230  *
231  * param address The physical address of cache.
232  * param size_byte size of the memory to be invalidated, should be larger than 0.
233  * note Address and size should be aligned to "L1CODCACHE_LINESIZE_BYTE".
234  * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
235  * startAddr is not aligned. For the size_byte, application should make sure the
236  * alignment or make sure the right operation order if the size_byte is not aligned.
237  */
CACHE64_InvalidateCacheByRange(uint32_t address,uint32_t size_byte)238 void CACHE64_InvalidateCacheByRange(uint32_t address, uint32_t size_byte)
239 {
240     if (size_byte > 0UL)
241     {
242         uint32_t endAddr = MSDK_REG_NONSECURE_ADDR(address + size_byte - 0x01U);
243         uint32_t pccReg  = 0;
244         /* Align address to cache line size. */
245         uint32_t startAddr = MSDK_REG_NONSECURE_ADDR(address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U));
246         uint32_t instance  = CACHE64_GetInstanceByAddr(address);
247         uint32_t endLim;
248         CACHE64_CTRL_Type *base;
249         uint32_t phyMemBase[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
250         uint32_t phyMemSize[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
251         memcpy(phyMemBase, s_cache64PhymemBases, sizeof(s_cache64PhymemBases));
252         memcpy(phyMemSize, s_cache64PhymemSizes, sizeof(s_cache64PhymemSizes));
253 
254         if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
255         {
256             return;
257         }
258         base    = s_cache64ctrlBases[instance];
259         endLim  = MSDK_REG_NONSECURE_ADDR(phyMemBase[instance][g_cache64MemPhyAliasId] + phyMemSize[instance][g_cache64MemPhyAliasId] - 0x01U);
260         endAddr = endAddr > endLim ? endLim : endAddr;
261 
262         /* Set the invalidate by line command and use the physical address. */
263         pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(1) | CACHE64_CTRL_CLCR_LADSEL_MASK;
264         base->CLCR = pccReg;
265 
266         while (startAddr < endAddr)
267         {
268             /* Set the address and initiate the command. */
269             base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
270 
271             /* Wait until the cache command completes. */
272             while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
273             {
274             }
275             startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
276         }
277     }
278 }
279 
280 /*!
281  * brief Cleans the cache.
282  *
283  */
CACHE64_CleanCache(CACHE64_CTRL_Type * base)284 void CACHE64_CleanCache(CACHE64_CTRL_Type *base)
285 {
286     /* Enable the to push all modified lines. */
287     base->CCR |= CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
288 
289     /* Wait until the cache command completes. */
290     while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
291     {
292     }
293 
294     /* As a precaution clear the bits to avoid inadvertently re-running this command. */
295     base->CCR &= ~(CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK);
296 }
297 
298 /*!
299  * brief Cleans cache by range.
300  *
301  * param address The physical address of cache.
302  * param size_byte size of the memory to be cleaned, should be larger than 0.
303  * note Address and size should be aligned to "CACHE64_LINESIZE_BYTE".
304  * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
305  * startAddr is not aligned. For the size_byte, application should make sure the
306  * alignment or make sure the right operation order if the size_byte is not aligned.
307  */
CACHE64_CleanCacheByRange(uint32_t address,uint32_t size_byte)308 void CACHE64_CleanCacheByRange(uint32_t address, uint32_t size_byte)
309 {
310     if (size_byte > 0UL)
311     {
312         uint32_t endAddr = MSDK_REG_NONSECURE_ADDR(address + size_byte - 0x01U);
313         uint32_t pccReg  = 0;
314         /* Align address to cache line size. */
315         uint32_t startAddr = MSDK_REG_NONSECURE_ADDR(address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U));
316         uint32_t instance  = CACHE64_GetInstanceByAddr(address);
317         uint32_t endLim;
318         CACHE64_CTRL_Type *base;
319         uint32_t phyMemBase[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
320         uint32_t phyMemSize[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
321         memcpy(phyMemBase, s_cache64PhymemBases, sizeof(s_cache64PhymemBases));
322         memcpy(phyMemSize, s_cache64PhymemSizes, sizeof(s_cache64PhymemSizes));
323 
324         if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
325         {
326             return;
327         }
328         base    = s_cache64ctrlBases[instance];
329         endLim  = MSDK_REG_NONSECURE_ADDR(phyMemBase[instance][g_cache64MemPhyAliasId] + phyMemSize[instance][g_cache64MemPhyAliasId] - 0x01U);
330         endAddr = endAddr > endLim ? endLim : endAddr;
331 
332         /* Set the push by line command. */
333         pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(2) | CACHE64_CTRL_CLCR_LADSEL_MASK;
334         base->CLCR = pccReg;
335 
336         while (startAddr < endAddr)
337         {
338             /* Set the address and initiate the command. */
339             base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
340 
341             /* Wait until the cache command completes. */
342             while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
343             {
344             }
345             startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
346         }
347     }
348 }
349 
350 /*!
351  * brief Cleans and invalidates the cache.
352  *
353  */
CACHE64_CleanInvalidateCache(CACHE64_CTRL_Type * base)354 void CACHE64_CleanInvalidateCache(CACHE64_CTRL_Type *base)
355 {
356     /* Push and invalidate all. */
357     base->CCR |= CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_INVW0_MASK |
358                  CACHE64_CTRL_CCR_INVW1_MASK | CACHE64_CTRL_CCR_GO_MASK;
359 
360     /* Wait until the cache command completes. */
361     while ((base->CCR & CACHE64_CTRL_CCR_GO_MASK) != 0x00U)
362     {
363     }
364 
365     /* As a precaution clear the bits to avoid inadvertently re-running this command. */
366     base->CCR &= ~(CACHE64_CTRL_CCR_PUSHW0_MASK | CACHE64_CTRL_CCR_PUSHW1_MASK | CACHE64_CTRL_CCR_INVW0_MASK |
367                    CACHE64_CTRL_CCR_INVW1_MASK);
368 }
369 
370 /*!
371  * brief Cleans and invalidate cache by range.
372  *
373  * param address The physical address of cache.
374  * param size_byte size of the memory to be Cleaned and Invalidated, should be larger than 0.
375  * note Address and size should be aligned to "CACHE64_LINESIZE_BYTE".
376  * The startAddr here will be forced to align to CACHE64_LINESIZE_BYTE if
377  * startAddr is not aligned. For the size_byte, application should make sure the
378  * alignment or make sure the right operation order if the size_byte is not aligned.
379  */
CACHE64_CleanInvalidateCacheByRange(uint32_t address,uint32_t size_byte)380 void CACHE64_CleanInvalidateCacheByRange(uint32_t address, uint32_t size_byte)
381 {
382     if (size_byte > 0UL)
383     {
384         uint32_t endAddr = MSDK_REG_NONSECURE_ADDR(address + size_byte - 0x01U);
385         uint32_t pccReg  = 0;
386         /* Align address to cache line size. */
387         uint32_t startAddr = MSDK_REG_NONSECURE_ADDR(address & ~((uint32_t)CACHE64_LINESIZE_BYTE - 1U));
388         uint32_t instance  = CACHE64_GetInstanceByAddr(address);
389         uint32_t endLim;
390         CACHE64_CTRL_Type *base;
391         uint32_t phyMemBase[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
392         uint32_t phyMemSize[FSL_FEATURE_SOC_CACHE64_CTRL_COUNT][CACHE64_PHYMEM_COLUM_COUNT];
393         memcpy(phyMemBase, s_cache64PhymemBases, sizeof(s_cache64PhymemBases));
394         memcpy(phyMemSize, s_cache64PhymemSizes, sizeof(s_cache64PhymemSizes));
395 
396         if (instance >= ARRAY_SIZE(s_cache64ctrlBases))
397         {
398             return;
399         }
400         base    = s_cache64ctrlBases[instance];
401         endLim  = MSDK_REG_NONSECURE_ADDR(phyMemBase[instance][g_cache64MemPhyAliasId] + phyMemSize[instance][g_cache64MemPhyAliasId] - 0x01U);
402         endAddr = endAddr > endLim ? endLim : endAddr;
403 
404         /* Set the push by line command. */
405         pccReg = (base->CLCR & ~CACHE64_CTRL_CLCR_LCMD_MASK) | CACHE64_CTRL_CLCR_LCMD(3) | CACHE64_CTRL_CLCR_LADSEL_MASK;
406         base->CLCR = pccReg;
407 
408         while (startAddr < endAddr)
409         {
410             /* Set the address and initiate the command. */
411             base->CSAR = (startAddr & CACHE64_CTRL_CSAR_PHYADDR_MASK) | CACHE64_CTRL_CSAR_LGO_MASK;
412 
413             /* Wait until the cache command completes. */
414             while ((base->CSAR & CACHE64_CTRL_CSAR_LGO_MASK) != 0x00U)
415             {
416             }
417             startAddr += (uint32_t)CACHE64_LINESIZE_BYTE;
418         }
419     }
420 }
421 
422 #if !(defined(FSL_FEATURE_CACHE64_CTRL_HAS_NO_WRITE_BUF) && FSL_FEATURE_CACHE64_CTRL_HAS_NO_WRITE_BUF)
423 /*!
424  * brief Enable the cache write buffer.
425  *
426  */
CACHE64_EnableWriteBuffer(CACHE64_CTRL_Type * base,bool enable)427 void CACHE64_EnableWriteBuffer(CACHE64_CTRL_Type *base, bool enable)
428 {
429     if (enable)
430     {
431         base->CCR |= CACHE64_CTRL_CCR_ENWRBUF_MASK;
432     }
433     else
434     {
435         base->CCR &= ~CACHE64_CTRL_CCR_ENWRBUF_MASK;
436     }
437 }
438 
439 #endif
440 
441 #endif /* FSL_FEATURE_SOC_CACHE64_CTRL_COUNT > 0 */
442