1 /*
2 * Copyright 2016-2021 NXP
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include "fsl_cache.h"
9
10 /*******************************************************************************
11 * Definitions
12 ******************************************************************************/
13
14 /* Component ID definition, used by tools. */
15 #ifndef FSL_COMPONENT_ID
16 #define FSL_COMPONENT_ID "platform.drivers.cache_armv7_m7"
17 #endif
18
19 #if defined(FSL_FEATURE_SOC_L2CACHEC_COUNT) && FSL_FEATURE_SOC_L2CACHEC_COUNT
20 #define L2CACHE_OPERATION_TIMEOUT 0xFFFFFU
21 #define L2CACHE_8WAYS_MASK 0xFFU
22 #define L2CACHE_16WAYS_MASK 0xFFFFU
23 #define L2CACHE_SMALLWAYS_NUM 8U
24 #define L2CACHE_1KBCOVERTOB 1024U
25 #define L2CACHE_SAMLLWAYS_SIZE 16U
26 #define L2CACHE_LOCKDOWN_REGNUM 8 /*!< Lock down register numbers.*/
27 /*******************************************************************************
28 * Prototypes
29 ******************************************************************************/
30 /*!
31 * @brief Set for all ways and waiting for the operation finished.
32 * This is provided for all the background operations.
33 *
34 * @param auxCtlReg The auxiliary control register.
35 * @param regAddr The register address to be operated.
36 */
37 static void L2CACHE_SetAndWaitBackGroundOperate(uint32_t auxCtlReg, uint32_t regAddr);
38
39 /*!
40 * @brief Invalidates the Level 2 cache line by physical address.
41 * This function invalidates a cache line by physcial address.
42 *
43 * @param address The physical addderss of the cache.
44 * The format of the address shall be :
45 * bit 31 ~ bit n+1 | bitn ~ bit5 | bit4 ~ bit0
46 * Tag | index | 0
47 * Note: the physical address shall be aligned to the line size - 32B (256 bit).
48 * so keep the last 5 bits (bit 4 ~ bit 0) of the physical address always be zero.
49 * If the input address is not aligned, it will be changed to 32-byte aligned address.
50 * The n is varies according to the index width.
51 * @return The actual 32-byte aligned physical address be operated.
52 */
53 static uint32_t L2CACHE_InvalidateLineByAddr(uint32_t address);
54
55 /*!
56 * @brief Cleans the Level 2 cache line based on the physical address.
57 * This function cleans a cache line based on a physcial address.
58 *
59 * @param address The physical addderss of the cache.
60 * The format of the address shall be :
61 * bit 31 ~ bit n+1 | bitn ~ bit5 | bit4 ~ bit0
62 * Tag | index | 0
63 * Note: the physical address shall be aligned to the line size - 32B (256 bit).
64 * so keep the last 5 bits (bit 4 ~ bit 0) of the physical address always be zero.
65 * If the input address is not aligned, it will be changed to 32-byte aligned address.
66 * The n is varies according to the index width.
67 * @return The actual 32-byte aligned physical address be operated.
68 */
69 static uint32_t L2CACHE_CleanLineByAddr(uint32_t address);
70
71 /*!
72 * @brief Cleans and invalidates the Level 2 cache line based on the physical address.
73 * This function cleans and invalidates a cache line based on a physcial address.
74 *
75 * @param address The physical addderss of the cache.
76 * The format of the address shall be :
77 * bit 31 ~ bit n+1 | bitn ~ bit5 | bit4 ~ bit0
78 * Tag | index | 0
79 * Note: the physical address shall be aligned to the line size - 32B (256 bit).
80 * so keep the last 5 bits (bit 4 ~ bit 0) of the physical address always be zero.
81 * If the input address is not aligned, it will be changed to 32-byte aligned address.
82 * The n is varies according to the index width.
83 * @return The actual 32-byte aligned physical address be operated.
84 */
85 static uint32_t L2CACHE_CleanInvalidateLineByAddr(uint32_t address);
86
87 /*!
88 * @brief Gets the number of the Level 2 cache and the way size.
89 * This function cleans and invalidates a cache line based on a physcial address.
90 *
91 * @param num_ways The number of the cache way.
92 * @param size_way The way size.
93 */
94 static void L2CACHE_GetWayNumSize(uint32_t *num_ways, uint32_t *size_way);
95 /*******************************************************************************
96 * Code
97 ******************************************************************************/
L2CACHE_SetAndWaitBackGroundOperate(uint32_t auxCtlReg,uint32_t regAddr)98 static void L2CACHE_SetAndWaitBackGroundOperate(uint32_t auxCtlReg, uint32_t regAddr)
99 {
100 uint16_t mask = L2CACHE_8WAYS_MASK;
101 uint32_t timeout = L2CACHE_OPERATION_TIMEOUT;
102
103 /* Check the ways used at first. */
104 if (auxCtlReg & L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_MASK)
105 {
106 mask = L2CACHE_16WAYS_MASK;
107 }
108
109 /* Set the opeartion for all ways/entries of the cache. */
110 *(uint32_t *)regAddr = mask;
111 /* Waiting for until the operation is complete. */
112 while ((*(volatile uint32_t *)regAddr & mask) && timeout)
113 {
114 __ASM("nop");
115 timeout--;
116 }
117 }
118
L2CACHE_InvalidateLineByAddr(uint32_t address)119 static uint32_t L2CACHE_InvalidateLineByAddr(uint32_t address)
120 {
121 /* Align the address first. */
122 address &= ~(uint32_t)(FSL_FEATURE_L2CACHE_LINESIZE_BYTE - 1);
123 /* Invalidate the cache line by physical address. */
124 L2CACHEC->REG7_INV_PA = address;
125
126 return address;
127 }
128
L2CACHE_CleanLineByAddr(uint32_t address)129 static uint32_t L2CACHE_CleanLineByAddr(uint32_t address)
130 {
131 /* Align the address first. */
132 address &= ~(uint32_t)(FSL_FEATURE_L2CACHE_LINESIZE_BYTE - 1);
133 /* Invalidate the cache line by physical address. */
134 L2CACHEC->REG7_CLEAN_PA = address;
135
136 return address;
137 }
138
L2CACHE_CleanInvalidateLineByAddr(uint32_t address)139 static uint32_t L2CACHE_CleanInvalidateLineByAddr(uint32_t address)
140 {
141 /* Align the address first. */
142 address &= ~(uint32_t)(FSL_FEATURE_L2CACHE_LINESIZE_BYTE - 1);
143 /* Clean and invalidate the cache line by physical address. */
144 L2CACHEC->REG7_CLEAN_INV_PA = address;
145
146 return address;
147 }
148
L2CACHE_GetWayNumSize(uint32_t * num_ways,uint32_t * size_way)149 static void L2CACHE_GetWayNumSize(uint32_t *num_ways, uint32_t *size_way)
150 {
151 assert(num_ways);
152 assert(size_way);
153
154 uint32_t number = (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_MASK) >>
155 L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_SHIFT;
156 uint32_t size = (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_WAYSIZE_MASK) >>
157 L2CACHEC_REG1_AUX_CONTROL_WAYSIZE_SHIFT;
158
159 *num_ways = (number + 1) * L2CACHE_SMALLWAYS_NUM;
160 if (!size)
161 {
162 /* 0 internally mapped to the same size as 1 - 16KB.*/
163 size += 1;
164 }
165 *size_way = (1 << (size - 1)) * L2CACHE_SAMLLWAYS_SIZE * L2CACHE_1KBCOVERTOB;
166 }
167
168 /*!
169 * brief Initializes the level 2 cache controller module.
170 *
171 * param config Pointer to configuration structure. See "l2cache_config_t".
172 */
L2CACHE_Init(l2cache_config_t * config)173 void L2CACHE_Init(l2cache_config_t *config)
174 {
175 assert(config);
176
177 uint16_t waysNum = 0xFFU; /* Default use the 8-way mask. */
178 uint8_t count;
179 uint32_t auxReg = 0;
180
181 /*The aux register must be configured when the cachec is disabled
182 * So disable first if the cache controller is enabled.
183 */
184 if (L2CACHEC->REG1_CONTROL & L2CACHEC_REG1_CONTROL_CE_MASK)
185 {
186 L2CACHE_Disable();
187 }
188
189 /* Unlock all entries. */
190 if (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_MASK)
191 {
192 waysNum = 0xFFFFU;
193 }
194
195 for (count = 0; count < L2CACHE_LOCKDOWN_REGNUM; count++)
196 {
197 L2CACHE_LockdownByWayEnable(count, waysNum, false);
198 }
199
200 /* Set the ways and way-size etc. */
201 auxReg = L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY(config->wayNum) |
202 L2CACHEC_REG1_AUX_CONTROL_WAYSIZE(config->waySize) | L2CACHEC_REG1_AUX_CONTROL_CRP(config->repacePolicy) |
203 L2CACHEC_REG1_AUX_CONTROL_IPE(config->istrPrefetchEnable) |
204 L2CACHEC_REG1_AUX_CONTROL_DPE(config->dataPrefetchEnable) |
205 L2CACHEC_REG1_AUX_CONTROL_NLE(config->nsLockdownEnable) |
206 L2CACHEC_REG1_AUX_CONTROL_FWA(config->writeAlloc) | L2CACHEC_REG1_AUX_CONTROL_HPSDRE(config->writeAlloc);
207 L2CACHEC->REG1_AUX_CONTROL = auxReg;
208
209 /* Set the tag/data ram latency. */
210 if (config->lateConfig)
211 {
212 uint32_t data = 0;
213 /* Tag latency. */
214 data = L2CACHEC_REG1_TAG_RAM_CONTROL_SL(config->lateConfig->tagSetupLate) |
215 L2CACHEC_REG1_TAG_RAM_CONTROL_SL(config->lateConfig->tagSetupLate) |
216 L2CACHEC_REG1_TAG_RAM_CONTROL_RAL(config->lateConfig->tagReadLate) |
217 L2CACHEC_REG1_TAG_RAM_CONTROL_WAL(config->lateConfig->dataWriteLate);
218 L2CACHEC->REG1_TAG_RAM_CONTROL = data;
219 /* Data latency. */
220 data = L2CACHEC_REG1_DATA_RAM_CONTROL_SL(config->lateConfig->dataSetupLate) |
221 L2CACHEC_REG1_DATA_RAM_CONTROL_SL(config->lateConfig->dataSetupLate) |
222 L2CACHEC_REG1_DATA_RAM_CONTROL_RAL(config->lateConfig->dataReadLate) |
223 L2CACHEC_REG1_DATA_RAM_CONTROL_WAL(config->lateConfig->dataWriteLate);
224 L2CACHEC->REG1_DATA_RAM_CONTROL = data;
225 }
226 }
227
228 /*!
229 * brief Gets an available default settings for the cache controller.
230 *
231 * This function initializes the cache controller configuration structure with default settings.
232 * The default values are:
233 * code
234 * config->waysNum = kL2CACHE_8ways;
235 * config->waySize = kL2CACHE_32KbSize;
236 * config->repacePolicy = kL2CACHE_Roundrobin;
237 * config->lateConfig = NULL;
238 * config->istrPrefetchEnable = false;
239 * config->dataPrefetchEnable = false;
240 * config->nsLockdownEnable = false;
241 * config->writeAlloc = kL2CACHE_UseAwcache;
242 * endcode
243 * param config Pointer to the configuration structure.
244 */
L2CACHE_GetDefaultConfig(l2cache_config_t * config)245 void L2CACHE_GetDefaultConfig(l2cache_config_t *config)
246 {
247 assert(config);
248
249 /* Initializes the configure structure to zero. */
250 memset(config, 0, sizeof(*config));
251
252 uint32_t number = (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_MASK) >>
253 L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_SHIFT;
254 uint32_t size = (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_WAYSIZE_MASK) >>
255 L2CACHEC_REG1_AUX_CONTROL_WAYSIZE_SHIFT;
256
257 /* Get the default value */
258 config->wayNum = (l2cache_way_num_t)number;
259 config->waySize = (l2cache_way_size)size;
260 config->repacePolicy = kL2CACHE_Roundrobin;
261 config->lateConfig = NULL;
262 config->istrPrefetchEnable = false;
263 config->dataPrefetchEnable = false;
264 config->nsLockdownEnable = false;
265 config->writeAlloc = kL2CACHE_UseAwcache;
266 }
267
268 /*!
269 * brief Enables the level 2 cache controller.
270 * This function enables the cache controller. Must be written using a secure access.
271 * If write with a Non-secure access will cause a DECERR response.
272 *
273 */
L2CACHE_Enable(void)274 void L2CACHE_Enable(void)
275 {
276 /* Invalidate first. */
277 L2CACHE_Invalidate();
278 /* Enable the level 2 cache controller. */
279 L2CACHEC->REG1_CONTROL = L2CACHEC_REG1_CONTROL_CE_MASK;
280 }
281
282 /*!
283 * brief Disables the level 2 cache controller.
284 * This function disables the cache controller. Must be written using a secure access.
285 * If write with a Non-secure access will cause a DECERR response.
286 *
287 */
L2CACHE_Disable(void)288 void L2CACHE_Disable(void)
289 {
290 /* First CleanInvalidate all enties in the cache. */
291 L2CACHE_CleanInvalidate();
292 /* Disable the level 2 cache controller. */
293 L2CACHEC->REG1_CONTROL &= ~L2CACHEC_REG1_CONTROL_CE_MASK;
294 /* DSB - data sync barrier.*/
295 __DSB();
296 }
297
298 /*!
299 * brief Invalidates the Level 2 cache.
300 * This function invalidates all entries in cache.
301 *
302 */
L2CACHE_Invalidate(void)303 void L2CACHE_Invalidate(void)
304 {
305 /* Invalidate all entries in cache. */
306 L2CACHE_SetAndWaitBackGroundOperate(L2CACHEC->REG1_AUX_CONTROL, (uint32_t)&L2CACHEC->REG7_INV_WAY);
307 /* Cache sync. */
308 L2CACHEC->REG7_CACHE_SYNC = 0;
309 }
310
311 /*!
312 * brief Cleans the level 2 cache controller.
313 * This function cleans all entries in the level 2 cache controller.
314 *
315 */
L2CACHE_Clean(void)316 void L2CACHE_Clean(void)
317 {
318 /* Clean all entries of the cache. */
319 L2CACHE_SetAndWaitBackGroundOperate(L2CACHEC->REG1_AUX_CONTROL, (uint32_t)&L2CACHEC->REG7_CLEAN_WAY);
320 /* Cache sync. */
321 L2CACHEC->REG7_CACHE_SYNC = 0;
322 }
323
324 /*!
325 * brief Cleans and invalidates the level 2 cache controller.
326 * This function cleans and invalidates all entries in the level 2 cache controller.
327 *
328 */
L2CACHE_CleanInvalidate(void)329 void L2CACHE_CleanInvalidate(void)
330 {
331 /* Clean all entries of the cache. */
332 L2CACHE_SetAndWaitBackGroundOperate(L2CACHEC->REG1_AUX_CONTROL, (uint32_t)&L2CACHEC->REG7_CLEAN_INV_WAY);
333 /* Cache sync. */
334 L2CACHEC->REG7_CACHE_SYNC = 0;
335 }
336
337 /*!
338 * brief Invalidates the Level 2 cache lines in the range of two physical addresses.
339 * This function invalidates all cache lines between two physical addresses.
340 *
341 * param address The start address of the memory to be invalidated.
342 * param size_byte The memory size.
343 * note The start address and size_byte should be 32-byte(FSL_FEATURE_L2CACHE_LINESIZE_BYTE) aligned.
344 * The startAddr here will be forced to align to L2 line size if startAddr
345 * is not aligned. For the size_byte, application should make sure the
346 * alignment or make sure the right operation order if the size_byte is not aligned.
347 */
L2CACHE_InvalidateByRange(uint32_t address,uint32_t size_byte)348 void L2CACHE_InvalidateByRange(uint32_t address, uint32_t size_byte)
349 {
350 uint32_t endAddr = address + size_byte;
351
352 /* Invalidate addresses in the range. */
353 while (address < endAddr)
354 {
355 address = L2CACHE_InvalidateLineByAddr(address);
356 /* Update the size. */
357 address += FSL_FEATURE_L2CACHE_LINESIZE_BYTE;
358 }
359
360 /* Cache sync. */
361 L2CACHEC->REG7_CACHE_SYNC = 0;
362 }
363
364 /*!
365 * brief Cleans the Level 2 cache lines in the range of two physical addresses.
366 * This function cleans all cache lines between two physical addresses.
367 *
368 * param address The start address of the memory to be cleaned.
369 * param size_byte The memory size.
370 * note The start address and size_byte should be 32-byte(FSL_FEATURE_L2CACHE_LINESIZE_BYTE) aligned.
371 * The startAddr here will be forced to align to L2 line size if startAddr
372 * is not aligned. For the size_byte, application should make sure the
373 * alignment or make sure the right operation order if the size_byte is not aligned.
374 */
L2CACHE_CleanByRange(uint32_t address,uint32_t size_byte)375 void L2CACHE_CleanByRange(uint32_t address, uint32_t size_byte)
376 {
377 uint32_t num_ways = 0;
378 uint32_t size_way = 0;
379 uint32_t endAddr = address + size_byte;
380
381 /* Get the number and size of the cache way. */
382 L2CACHE_GetWayNumSize(&num_ways, &size_way);
383
384 /* Check if the clean size is over the cache size. */
385 if ((endAddr - address) > num_ways * size_way)
386 {
387 L2CACHE_Clean();
388 return;
389 }
390
391 /* Clean addresses in the range. */
392 while ((address & ~(uint32_t)(FSL_FEATURE_L2CACHE_LINESIZE_BYTE - 1)) < endAddr)
393 {
394 /* Clean the address in the range. */
395 address = L2CACHE_CleanLineByAddr(address);
396 address += FSL_FEATURE_L2CACHE_LINESIZE_BYTE;
397 }
398
399 L2CACHEC->REG7_CACHE_SYNC = 0;
400 }
401
402 /*!
403 * brief Cleans and invalidates the Level 2 cache lines in the range of two physical addresses.
404 * This function cleans and invalidates all cache lines between two physical addresses.
405 *
406 * param address The start address of the memory to be cleaned and invalidated.
407 * param size_byte The memory size.
408 * note The start address and size_byte should be 32-byte(FSL_FEATURE_L2CACHE_LINESIZE_BYTE) aligned.
409 * The startAddr here will be forced to align to L2 line size if startAddr
410 * is not aligned. For the size_byte, application should make sure the
411 * alignment or make sure the right operation order if the size_byte is not aligned.
412 */
L2CACHE_CleanInvalidateByRange(uint32_t address,uint32_t size_byte)413 void L2CACHE_CleanInvalidateByRange(uint32_t address, uint32_t size_byte)
414 {
415 uint32_t num_ways = 0;
416 uint32_t size_way = 0;
417 uint32_t endAddr = address + size_byte;
418
419 /* Get the number and size of the cache way. */
420 L2CACHE_GetWayNumSize(&num_ways, &size_way);
421
422 /* Check if the clean size is over the cache size. */
423 if ((endAddr - address) > num_ways * size_way)
424 {
425 L2CACHE_CleanInvalidate();
426 return;
427 }
428
429 /* Clean addresses in the range. */
430 while ((address & ~(uint32_t)(FSL_FEATURE_L2CACHE_LINESIZE_BYTE - 1)) < endAddr)
431 {
432 /* Clean the address in the range. */
433 address = L2CACHE_CleanInvalidateLineByAddr(address);
434 address += FSL_FEATURE_L2CACHE_LINESIZE_BYTE;
435 }
436
437 L2CACHEC->REG7_CACHE_SYNC = 0;
438 }
439
440 /*!
441 * brief Enables or disables to lock down the data and instruction by way.
442 * This function locks down the cached instruction/data by way and prevent the adresses from
443 * being allocated and prevent dara from being evicted out of the level 2 cache.
444 * But the normal cache maintenance operations that invalidate, clean or clean
445 * and validate cache contents affect the locked-down cache lines as normal.
446 *
447 * param masterId The master id, range from 0 ~ 7.
448 * param mask The ways to be enabled or disabled to lockdown.
449 * each bit in value is related to each way of the cache. for example:
450 * value: bit 0 ------ way 0.
451 * value: bit 1 ------ way 1.
452 * --------------------------
453 * value: bit 15 ------ way 15.
454 * Note: please make sure the value setting is align with your supported ways.
455 * param enable True enable the lockdown, false to disable the lockdown.
456 */
L2CACHE_LockdownByWayEnable(uint32_t masterId,uint32_t mask,bool enable)457 void L2CACHE_LockdownByWayEnable(uint32_t masterId, uint32_t mask, bool enable)
458 {
459 uint8_t num_ways = (L2CACHEC->REG1_AUX_CONTROL & L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_MASK) >>
460 L2CACHEC_REG1_AUX_CONTROL_ASSOCIATIVITY_SHIFT;
461 num_ways = (num_ways + 1) * L2CACHE_SMALLWAYS_NUM;
462
463 assert(mask < (1U << num_ways));
464 assert(masterId < L2CACHE_LOCKDOWN_REGNUM);
465
466 uint32_t dataReg = L2CACHEC->LOCKDOWN[masterId].REG9_D_LOCKDOWN;
467 uint32_t istrReg = L2CACHEC->LOCKDOWN[masterId].REG9_I_LOCKDOWN;
468
469 if (enable)
470 {
471 /* Data lockdown. */
472 L2CACHEC->LOCKDOWN[masterId].REG9_D_LOCKDOWN = dataReg | mask;
473 /* Instruction lockdown. */
474 L2CACHEC->LOCKDOWN[masterId].REG9_I_LOCKDOWN = istrReg | mask;
475 }
476 else
477 {
478 /* Data lockdown. */
479 L2CACHEC->LOCKDOWN[masterId].REG9_D_LOCKDOWN = dataReg & ~mask;
480 /* Instruction lockdown. */
481 L2CACHEC->LOCKDOWN[masterId].REG9_I_LOCKDOWN = istrReg & ~mask;
482 }
483 }
484 #endif /* FSL_FEATURE_SOC_L2CACHEC_COUNT */
485
486 /*!
487 * brief Invalidate cortex-m7 L1 instruction cache by range.
488 *
489 * param address The start address of the memory to be invalidated.
490 * param size_byte The memory size.
491 * note The start address and size_byte should be 32-byte(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE) aligned.
492 * The startAddr here will be forced to align to L1 I-cache line size if
493 * startAddr is not aligned. For the size_byte, application should make sure the
494 * alignment or make sure the right operation order if the size_byte is not aligned.
495 */
L1CACHE_InvalidateICacheByRange(uint32_t address,uint32_t size_byte)496 void L1CACHE_InvalidateICacheByRange(uint32_t address, uint32_t size_byte)
497 {
498 #if (__DCACHE_PRESENT == 1U)
499 uint32_t addr = address & ~((uint32_t)FSL_FEATURE_L1ICACHE_LINESIZE_BYTE - 1U);
500 uint32_t align_len = address - addr;
501 int32_t size = (int32_t)size_byte + (int32_t)align_len;
502
503 __DSB();
504 while (size > 0)
505 {
506 SCB->ICIMVAU = addr;
507 addr += (uint32_t)FSL_FEATURE_L1ICACHE_LINESIZE_BYTE;
508 size -= (int32_t)FSL_FEATURE_L1ICACHE_LINESIZE_BYTE;
509 }
510 __DSB();
511 __ISB();
512 #endif
513 }
514
515 /*!
516 * brief Invalidates all instruction caches by range.
517 *
518 * Both cortex-m7 L1 cache line and L2 PL310 cache line length is 32-byte.
519 *
520 * param address The physical address.
521 * param size_byte size of the memory to be invalidated.
522 * note address and size should be aligned to cache line size
523 * 32-Byte due to the cache operation unit is one cache line. The startAddr here will be forced
524 * to align to the cache line size if startAddr is not aligned. For the size_byte, application should
525 * make sure the alignment or make sure the right operation order if the size_byte is not aligned.
526 */
ICACHE_InvalidateByRange(uint32_t address,uint32_t size_byte)527 void ICACHE_InvalidateByRange(uint32_t address, uint32_t size_byte)
528 {
529 #if defined(FSL_FEATURE_SOC_L2CACHEC_COUNT) && FSL_FEATURE_SOC_L2CACHEC_COUNT
530 #if defined(FSL_SDK_DISBLE_L2CACHE_PRESENT) && !FSL_SDK_DISBLE_L2CACHE_PRESENT
531 L2CACHE_InvalidateByRange(address, size_byte);
532 #endif /* !FSL_SDK_DISBLE_L2CACHE_PRESENT */
533 #endif /* FSL_FEATURE_SOC_L2CACHEC_COUNT */
534
535 L1CACHE_InvalidateICacheByRange(address, size_byte);
536 }
537
538 /*!
539 * brief Invalidates all data caches by range.
540 *
541 * Both cortex-m7 L1 cache line and L2 PL310 cache line length is 32-byte.
542 *
543 * param address The physical address.
544 * param size_byte size of the memory to be invalidated.
545 * note address and size should be aligned to cache line size
546 * 32-Byte due to the cache operation unit is one cache line. The startAddr here will be forced
547 * to align to the cache line size if startAddr is not aligned. For the size_byte, application should
548 * make sure the alignment or make sure the right operation order if the size_byte is not aligned.
549 */
DCACHE_InvalidateByRange(uint32_t address,uint32_t size_byte)550 void DCACHE_InvalidateByRange(uint32_t address, uint32_t size_byte)
551 {
552 #if defined(FSL_FEATURE_SOC_L2CACHEC_COUNT) && FSL_FEATURE_SOC_L2CACHEC_COUNT
553 #if defined(FSL_SDK_DISBLE_L2CACHE_PRESENT) && !FSL_SDK_DISBLE_L2CACHE_PRESENT
554 L2CACHE_InvalidateByRange(address, size_byte);
555 #endif /* !FSL_SDK_DISBLE_L2CACHE_PRESENT */
556 #endif /* FSL_FEATURE_SOC_L2CACHEC_COUNT */
557 L1CACHE_InvalidateDCacheByRange(address, size_byte);
558 }
559
560 /*!
561 * brief Cleans all data caches by range.
562 *
563 * Both cortex-m7 L1 cache line and L2 PL310 cache line length is 32-byte.
564 *
565 * param address The physical address.
566 * param size_byte size of the memory to be cleaned.
567 * note address and size should be aligned to cache line size
568 * 32-Byte due to the cache operation unit is one cache line. The startAddr here will be forced
569 * to align to the cache line size if startAddr is not aligned. For the size_byte, application should
570 * make sure the alignment or make sure the right operation order if the size_byte is not aligned.
571 */
DCACHE_CleanByRange(uint32_t address,uint32_t size_byte)572 void DCACHE_CleanByRange(uint32_t address, uint32_t size_byte)
573 {
574 L1CACHE_CleanDCacheByRange(address, size_byte);
575 #if defined(FSL_FEATURE_SOC_L2CACHEC_COUNT) && FSL_FEATURE_SOC_L2CACHEC_COUNT
576 #if defined(FSL_SDK_DISBLE_L2CACHE_PRESENT) && !FSL_SDK_DISBLE_L2CACHE_PRESENT
577 L2CACHE_CleanByRange(address, size_byte);
578 #endif /* !FSL_SDK_DISBLE_L2CACHE_PRESENT */
579 #endif /* FSL_FEATURE_SOC_L2CACHEC_COUNT */
580 }
581
582 /*!
583 * brief Cleans and Invalidates all data caches by range.
584 *
585 * Both cortex-m7 L1 cache line and L2 PL310 cache line length is 32-byte.
586 *
587 * param address The physical address.
588 * param size_byte size of the memory to be cleaned and invalidated.
589 * note address and size should be aligned to cache line size
590 * 32-Byte due to the cache operation unit is one cache line. The startAddr here will be forced
591 * to align to the cache line size if startAddr is not aligned. For the size_byte, application should
592 * make sure the alignment or make sure the right operation order if the size_byte is not aligned.
593 */
DCACHE_CleanInvalidateByRange(uint32_t address,uint32_t size_byte)594 void DCACHE_CleanInvalidateByRange(uint32_t address, uint32_t size_byte)
595 {
596 L1CACHE_CleanInvalidateDCacheByRange(address, size_byte);
597 #if defined(FSL_FEATURE_SOC_L2CACHEC_COUNT) && FSL_FEATURE_SOC_L2CACHEC_COUNT
598 #if defined(FSL_SDK_DISBLE_L2CACHE_PRESENT) && !FSL_SDK_DISBLE_L2CACHE_PRESENT
599 L2CACHE_CleanInvalidateByRange(address, size_byte);
600 #endif /* !FSL_SDK_DISBLE_L2CACHE_PRESENT */
601 #endif /* FSL_FEATURE_SOC_L2CACHEC_COUNT */
602 }
603