1 /*
2  * Copyright 2023-2024 NXP
3  * All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  */
7 
8 #include "fsl_xspi.h"
9 
10 /* Component ID definition, used by tools. */
11 #ifndef FSL_COMPONENT_ID
12 #define FSL_COMPONENT_ID "platform.drivers.xspi"
13 #endif
14 
15 /*******************************************************************************
16  * Definitions
17  ******************************************************************************/
18 
19 #define FREQ_1MHz        (1000000UL)
20 #define XSPI_LUT_KEY_VAL (0x5AF05AF0UL)
21 
22 /*! brief Common sets of flags used by the driver, _xspi_flag_constants. */
23 enum kXSPI_Flags
24 {
25     /*! IRQ sources enabled by the non-blocking transactional API. */
26     kIrqFlags = kXSPI_TxBufferFillFlag | kXSPI_TxBufferUnderrunFlag | kXSPI_RxBufferOverflowFlag |
27                 kXSPI_RxBufferDrainFlag | kXSPI_IllegalInstructionErrorFlag | kXSPI_IpCmdtriggerErrorFlag,
28 
29     /*! IP CMD Errors to check for. */
30     kIpcmdErrorFlags = kXSPI_IllegalInstructionErrorFlag | kXSPI_IpCmdtriggerErrorFlag,
31 
32     kFlashProtectionErrorFlags = kXSPI_FradMatchErrorFlag | kXSPI_FradnAccErrorFlag | kXSPI_IpsErrorFlag |
33                                  kXSPI_Tg0SfarErrorFlag | kXSPI_Tg1SfarErrorFlag | kXSPI_TgnIpcrErrorFlag,
34 };
35 
36 /* XSPI FSM status. */
37 enum
38 {
39     kXSPI_TransactionIsGrantedXSPIsBusy = 0x0U, /*!< Transaction is granted, but XSPI is busy
40                                                   with any previous DMA transaction is ongoing*/
41     kXSPI_TbdrLockIsOpen           = 0x1U,      /*!< TBDR lock is open. IPS master can write in TBDR.*/
42     kXSPI_WriteTransferIsTriggered = 0x2U,      /*!< Write transfer is triggered. SEQID is written to XSPI.*/
43     kXSPI_ReadTransferIsTriggered  = 0x3U,      /*!< Read transfer is triggered. SEQID is written to XSPI.*/
44 };
45 
46 /* XSPI transfer state, _xspi_transfer_state. */
47 enum
48 {
49     kXSPI_Idle      = 0x0U, /*!< Transfer is done. */
50     kXSPI_BusyWrite = 0x1U, /*!< XSPI is busy write transfer. */
51     kXSPI_BusyRead  = 0x2U, /*!< XSPI is busy write transfer. */
52 };
53 
54 /*! brief Typedef for interrupt handler. */
55 typedef void (*xspi_isr_t)(XSPI_Type *base, xspi_handle_t *handle);
56 
57 #define XSPI_TG_MDAD_REG_OFFSET_ARRAY                                                   \
58     {                                                                                   \
59         (uint32_t) offsetof(XSPI_Type, TG0MDAD), (uint32_t)offsetof(XSPI_Type, TG1MDAD) \
60     }
61 
62 #define XSPI_FRAD_WORD0_REG_OFFSET_ARRAY                                                            \
63     {                                                                                               \
64         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD0), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD0),    \
65             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD0), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD0), \
66             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD0), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD0), \
67             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD0), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD0), \
68     }
69 
70 #define XSPI_FRAD_WORD1_REG_OFFSET_ARRAY                                                            \
71     {                                                                                               \
72         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD1), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD1),    \
73             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD1), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD1), \
74             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD1), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD1), \
75             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD1), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD1), \
76     }
77 
78 #define XSPI_FRAD_WORD2_REG_OFFSET_ARRAY                                                            \
79     {                                                                                               \
80         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD2), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD2),    \
81             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD2), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD2), \
82             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD2), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD2), \
83             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD2), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD2), \
84     }
85 
86 #define XSPI_FRAD_WORD3_REG_OFFSET_ARRAY                                                            \
87     {                                                                                               \
88         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD3), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD3),    \
89             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD3), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD3), \
90             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD3), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD3), \
91             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD3), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD3), \
92     }
93 
94 #define XSPI_FRAD_WORD4_REG_OFFSET_ARRAY                                                            \
95     {                                                                                               \
96         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD4), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD4),    \
97             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD4), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD4), \
98             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD4), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD4), \
99             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD4), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD4), \
100     }
101 
102 #define XSPI_FRAD_WORD5_REG_OFFSET_ARRAY                                                            \
103     {                                                                                               \
104         (uint32_t) offsetof(XSPI_Type, FRAD0_WORD5), (uint32_t)offsetof(XSPI_Type, FRAD1_WORD5),    \
105             (uint32_t)offsetof(XSPI_Type, FRAD2_WORD5), (uint32_t)offsetof(XSPI_Type, FRAD3_WORD5), \
106             (uint32_t)offsetof(XSPI_Type, FRAD4_WORD5), (uint32_t)offsetof(XSPI_Type, FRAD5_WORD5), \
107             (uint32_t)offsetof(XSPI_Type, FRAD6_WORD5), (uint32_t)offsetof(XSPI_Type, FRAD7_WORD5), \
108     }
109 
110 #define XSPI_TGSFAR_REG_OFFSET                                                              \
111     {                                                                                       \
112         offsetof(XSPI_Type, TGSFAR), offsetof(XSPI_Type, SUB_REG_MDAM_ARRAY[0].TGSFAR_SUB), \
113     }
114 
115 #define XSPI_TGSFARS_REG_OFFSET                                                               \
116     {                                                                                         \
117         offsetof(XSPI_Type, TGSFARS), offsetof(XSPI_Type, SUB_REG_MDAM_ARRAY[0].TGSFARS_SUB), \
118     }
119 
120 #define XSPI_TGIPCRS_REG_OFFSET                                                               \
121     {                                                                                         \
122         offsetof(XSPI_Type, TGIPCRS), offsetof(XSPI_Type, SUB_REG_MDAM_ARRAY[0].TGIPCRS_SUB), \
123     }
124 
125 #define XSPI_SFP_TG_IPCR_REG_OFFSET                                                                   \
126     {                                                                                                 \
127         offsetof(XSPI_Type, SFP_TG_IPCR), offsetof(XSPI_Type, SUB_REG_MDAM_ARRAY[0].SFP_TG_SUB_IPCR), \
128     }
129 
130 #define XSPI_SFP_TG_SFAR_REG_OFFSET                                                                   \
131     {                                                                                                 \
132         offsetof(XSPI_Type, SFP_TG_SFAR), offsetof(XSPI_Type, SUB_REG_MDAM_ARRAY[0].SFP_TG_SUB_SFAR), \
133     }
134 
135 #define XSPI_MCR_X16_MODE_MASK  (0x300000UL)
136 #define XSPI_MCR_X16_MODE_SHIFT (20UL)
137 #define XSPI_MCR_X16_MODE(x)    (((uint32_t)(x) << XSPI_MCR_X16_MODE_SHIFT) & XSPI_MCR_X16_MODE_MASK)
138 /*******************************************************************************
139  * Prototypes
140  ******************************************************************************/
141 static uint8_t XSPI_GetPPWBFromPageSize(uint32_t pageSize);
142 
143 /*******************************************************************************
144  * Variables
145  ******************************************************************************/
146 /*! brief Pointers to xspi bases for each instance. */
147 static XSPI_Type *const s_xspiBases[] = XSPI_BASE_PTRS;
148 
149 /*! brief Pointers to xspi IRQ number for each instance. */
150 static const IRQn_Type s_xspiIrqs[] = XSPI_IRQS;
151 
152 /*! brief Pointers to xspi amba base for each instance. */
153 static uint32_t s_xspiAmbaBase[] = XSPI_AMBA_BASES;
154 
155 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
156 /* Clock name array */
157 static const clock_ip_name_t s_xspiClock[] = XSPI_CLOCKS;
158 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
159 
160 /*! brief Pointers to XSPI resets for each instance. */
161 static const reset_ip_name_t s_xspiResets[] = XSPI_RSTS;
162 
163 static uint32_t s_tgSfarsRegOffset[]     = XSPI_TGSFARS_REG_OFFSET;
164 static uint32_t s_tgIpcrsRegOffset[]     = XSPI_TGIPCRS_REG_OFFSET;
165 static uint32_t s_sfpTgIpcrRegOffset[]   = XSPI_SFP_TG_IPCR_REG_OFFSET;
166 static uint32_t s_sfpTgIpSfarRegOffset[] = XSPI_SFP_TG_SFAR_REG_OFFSET;
167 static uint32_t s_tgMdadRegOffset[]      = XSPI_TG_MDAD_REG_OFFSET_ARRAY;
168 
169 /*******************************************************************************
170  * Code
171  ******************************************************************************/
172 /* To avoid compiler opitimizing this API into memset() in library. */
173 #if defined(__ICCARM__)
174 #pragma optimize = none
175 #endif /* defined(__ICCARM__) */
176 
XSPI_GetPPWBFromPageSize(uint32_t pageSize)177 static uint8_t XSPI_GetPPWBFromPageSize(uint32_t pageSize)
178 {
179     uint8_t ppwbValue = 0U;
180 
181     switch (pageSize)
182     {
183         case 64UL:
184         {
185             ppwbValue = 6U;
186             break;
187         }
188         case 128UL:
189         {
190             ppwbValue = 7U;
191             break;
192         }
193         case 256UL:
194         {
195             ppwbValue = 8U;
196             break;
197         }
198         case 512UL:
199         {
200             ppwbValue = 9U;
201             break;
202         }
203         case 1024UL:
204         {
205             ppwbValue = 10U;
206             break;
207         }
208         default:
209         {
210             /* Input pageSize not included. */
211             assert(false);
212             break;
213         }
214     }
215 
216     return ppwbValue;
217 }
218 
219 /******************* Initialization And Deinitialization Functional Interfaces Start ***********************/
220 /*!
221  * brief Get the instance number for XSPI.
222  *
223  * param base XSPI base pointer.
224  */
XSPI_GetInstance(XSPI_Type * base)225 uint32_t XSPI_GetInstance(XSPI_Type *base)
226 {
227     uint32_t instance;
228 
229     /* Find the instance index from base address mappings. */
230     for (instance = 0; instance < ARRAY_SIZE(s_xspiBases); instance++)
231     {
232         if (MSDK_REG_SECURE_ADDR(s_xspiBases[instance]) == MSDK_REG_SECURE_ADDR(base))
233         {
234             break;
235         }
236     }
237 
238     assert(instance < ARRAY_SIZE(s_xspiBases));
239 
240     return instance;
241 }
242 
243 /*!
244  * brief Check and clear IP command execution errors.
245  *
246  * param base XSPI base pointer.
247  * param status interrupt status.
248  */
XSPI_CheckAndClearError(XSPI_Type * base,uint32_t status)249 status_t XSPI_CheckAndClearError(XSPI_Type *base, uint32_t status)
250 {
251     status_t result = kStatus_Success;
252 
253     /* Check for error. */
254     status &= (uint32_t)kFlashProtectionErrorFlags;
255     if (0U != status)
256     {
257         /* Select the correct error code.. */
258         if (0U != (status & (uint32_t)kXSPI_SequenceExecutionTimeoutFlag))
259         {
260             result = kStatus_XSPI_SequenceExecutionTimeout;
261             /* Clear the flags. */
262             base->ERRSTAT |= (uint32_t)kXSPI_SequenceExecutionTimeoutFlag;
263         }
264         else if (0U != ((status & (uint32_t)kXSPI_FradMatchErrorFlag) | (status & (uint32_t)kXSPI_FradnAccErrorFlag)))
265         {
266             result = kStatus_XSPI_FradCheckError;
267             /* Clear the flags. */
268             if (0U != (status & (uint32_t)kXSPI_FradMatchErrorFlag))
269             {
270                 base->ERRSTAT |= (uint32_t)kXSPI_FradMatchErrorFlag;
271             }
272             else
273             {
274                 base->ERRSTAT |= (uint32_t)kXSPI_FradnAccErrorFlag;
275             }
276         }
277         else if (0U != (status & (uint32_t)kXSPI_IpsErrorFlag))
278         {
279             result = kStatus_XSPI_IpsBusTransError;
280             /* Clear the flags. */
281             base->IPSERROR |= XSPI_IPSERROR_CLR_MASK;
282         }
283         else if (0U != ((status & (uint32_t)kXSPI_Tg0SfarErrorFlag) | (status & (uint32_t)kXSPI_Tg1SfarErrorFlag) |
284                         (status & (uint32_t)kXSPI_TgnIpcrErrorFlag)))
285         {
286             result = kStatus_XSPI_TgQueueWritingError;
287             /* Clear the flags. */
288             if (0U != ((status & (uint32_t)kXSPI_Tg0SfarErrorFlag) | (status & (uint32_t)kXSPI_Tg1SfarErrorFlag)))
289             {
290                 base->TGSFARS |= XSPI_TGSFARS_CLR_MASK;
291             }
292             else
293             {
294                 base->TGIPCRS |= XSPI_TGIPCRS_CLR_MASK;
295             }
296         }
297         else
298         {
299             assert(false);
300         }
301 
302         /* Reset fifos. These flags clear automatically. */
303         base->MCR |= XSPI_MCR_CLR_RXF_MASK;
304         base->MCR |= XSPI_MCR_CLR_TXF_MASK;
305     }
306 
307     return result;
308 }
309 
310 /*!
311  * brief Initializes the XSPI module and internal state.
312  *
313  * This function  configures the XSPI with the
314  * input configure parameters. Users should call this function before any XSPI operations.
315  *
316  * param base XSPI peripheral base address.
317  * param config XSPI configure structure.
318  */
319 
XSPI_Init(XSPI_Type * base,const xspi_config_t * ptrConfig)320 void XSPI_Init(XSPI_Type *base, const xspi_config_t *ptrConfig)
321 {
322     uint32_t tmp32 = 0UL;
323 
324 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
325     /* Enable the xspi clock */
326     (void)CLOCK_EnableClock(s_xspiClock[XSPI_GetInstance(base)]);
327 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
328     /* Reset the XSPI module */
329     RESET_ReleasePeripheralReset(s_xspiResets[XSPI_GetInstance(base)]);
330 
331     /* Reset SFM, AHB domain and TG queue. */
332     XSPI_SoftwareReset(base);
333 
334     /* Disable module before initialization. */
335     XSPI_EnableModule(base, false);
336 
337     /* Clear AHB buffer, TX buffer, RX buffer. */
338     XSPI_ClearAhbBuffer(base);
339     XSPI_ClearTxBuffer(base);
340     XSPI_ClearRxBuffer(base);
341 
342     /* If any flags are asserted, clear firstly. */
343     tmp32 = base->FR;
344     if (tmp32 != 0UL)
345     {
346         base->FR = tmp32;
347     }
348 
349     XSPI_EnableDozeMode(base, ptrConfig->enableDoze);
350 
351     base->MCR = ((base->MCR) & (~XSPI_MCR_END_CFG_MASK)) | XSPI_MCR_END_CFG(ptrConfig->byteOrder);
352 
353     if (ptrConfig->ptrAhbAccessConfig != NULL)
354     {
355         (void)XSPI_SetAhbAccessConfig(base, ptrConfig->ptrAhbAccessConfig);
356     }
357 
358     if (ptrConfig->ptrIpAccessConfig != NULL)
359     {
360         (void)XSPI_SetIpAccessConfig(base, ptrConfig->ptrIpAccessConfig);
361     }
362 
363     /* Enable XSPI module. */
364     XSPI_EnableModule(base, true);
365 }
366 
367 /*!
368  * brief Gets default settings for XSPI.
369  *
370  * param base XSPI peripheral base address.
371  * param config XSPI configuration structure.
372  * param devconfig Flash configuration parameters.
373 
374  */
XSPI_GetDefaultConfig(xspi_config_t * ptrConfig)375 void XSPI_GetDefaultConfig(xspi_config_t *ptrConfig)
376 {
377     assert(ptrConfig != NULL);
378 
379     ptrConfig->byteOrder  = kXSPI_64BitLE;
380     ptrConfig->enableDoze = false;
381 
382     if (ptrConfig->ptrAhbAccessConfig != NULL)
383     {
384         /* If ptrAhbAccessConfig is not NULL, it means AHB access feature will be used. */
385         ptrConfig->ptrAhbAccessConfig->ahbAlignment                = kXSPI_AhbAlignmentNoLimit;
386         ptrConfig->ptrAhbAccessConfig->ahbErrorPayload.highPayload = 0UL;
387         ptrConfig->ptrAhbAccessConfig->ahbErrorPayload.lowPayload  = 0UL;
388         ptrConfig->ptrAhbAccessConfig->ahbSplitSize                = kXSPI_AhbSplitSizeDisabled;
389 
390         for (uint8_t i = 0U; i < XSPI_BUFCR_COUNT; i++)
391         {
392             ptrConfig->ptrAhbAccessConfig->buffer[i].masterId = i;
393             if (i == 3U)
394             {
395                 ptrConfig->ptrAhbAccessConfig->buffer[i].enaPri.enableAllMaster = true;
396             }
397             else
398             {
399                 ptrConfig->ptrAhbAccessConfig->buffer[i].enaPri.enablePriority = false;
400             }
401             ptrConfig->ptrAhbAccessConfig->buffer[i].bufferSize = 0x80U;
402 
403             ptrConfig->ptrAhbAccessConfig->buffer[i].ptrSubBuffer0Config = NULL;
404             ptrConfig->ptrAhbAccessConfig->buffer[i].ptrSubBuffer1Config = NULL;
405             ptrConfig->ptrAhbAccessConfig->buffer[i].ptrSubBuffer2Config = NULL;
406             ptrConfig->ptrAhbAccessConfig->buffer[i].ptrSubBuffer3Config = NULL;
407         }
408         ptrConfig->ptrAhbAccessConfig->ptrAhbWriteConfig         = NULL;
409         ptrConfig->ptrAhbAccessConfig->enableAHBPrefetch         = true;
410         ptrConfig->ptrAhbAccessConfig->enableAHBBufferWriteFlush = false;
411         ptrConfig->ptrAhbAccessConfig->ARDSeqIndex               = 0U;
412     }
413 
414     if (ptrConfig->ptrIpAccessConfig != NULL)
415     {
416         /* If ptrAhbAccessConfig is not NULL, it means AHB access feature will be used. */
417         /* In default settings, FRAD check and MDAD check are both disabled. */
418         ptrConfig->ptrIpAccessConfig->ptrSfpMdadConfig               = NULL;
419         ptrConfig->ptrIpAccessConfig->ptrSfpFradConfig               = NULL;
420         ptrConfig->ptrIpAccessConfig->sfpArbitrationLockTimeoutValue = 0xFFFFFUL;
421         ptrConfig->ptrIpAccessConfig->ipAccessTimeoutValue           = 0xFFFFUL;
422     }
423 }
424 
425 /*!
426  * brief Deinitialize the XSPI module.
427  *
428  * Clears the XSPI state and  XSPI module registers.
429  * param base XSPI peripheral base address.
430  */
XSPI_Deinit(XSPI_Type * base)431 void XSPI_Deinit(XSPI_Type *base)
432 {
433     XSPI_EnableModule(base, false);
434     /* Reset peripheral. */
435     XSPI_SoftwareReset(base);
436 }
437 
438 /*! brief Updates the LUT table.
439  *
440  * param base XSPI peripheral base address.
441  * param index From which index start to update. It could be any index of the LUT table, which
442  * also allows user to update command content inside a command. Each command consists of up to
443  * 10 instructions and occupy 4*32-bit memory.
444  * param cmd Command sequence array.
445  * param count Number of instruction-operand pairs.
446  */
XSPI_UpdateLUT(XSPI_Type * base,uint8_t index,const uint32_t * cmd,uint8_t count)447 void XSPI_UpdateLUT(XSPI_Type *base, uint8_t index, const uint32_t *cmd, uint8_t count)
448 {
449     assert(index < 80U);
450 
451     uint32_t i = 0;
452     volatile uint32_t *lutBase;
453 
454     /* Wait for bus to be idle before changing flash configuration. */
455     while (!XSPI_GetBusIdleStatus(base))
456     {
457     }
458 
459     /* Unlock LUT for update. */
460     base->LUTKEY = XSPI_LUT_KEY_VAL;
461     base->LCKCR  = 0x02;
462 
463     lutBase = &base->LUT[index];
464     for (i = 0; i < count; i++)
465     {
466         *lutBase++ = *cmd++;
467     }
468 
469     /* Lock LUT. */
470     base->LUTKEY = XSPI_LUT_KEY_VAL;
471     base->LCKCR  = 0x01;
472 }
473 
474 /******************* Initialization And Deinitialization Functional Interfaces End ***********************/
475 
476 /***************************** XSPI Controller Low-Level Inferfaces Start ********************************/
477 /*!
478  * brief Reset Serial flash memory domain and AHB domain at the same time.
479  *
480  * note Resetting only the Serial flash memory domain and AHB domain may cause undesirable side effects.
481  *
482  * param base XSPI peripheral base address.
483  */
XSPI_ResetSfmAndAhbDomain(XSPI_Type * base)484 void XSPI_ResetSfmAndAhbDomain(XSPI_Type *base)
485 {
486     /* XSPI module must be enabled, before assert reset to domains. */
487     XSPI_EnableModule(base, true);
488 
489     base->MCR |= XSPI_MCR_SWRSTSD_MASK | XSPI_MCR_SWRSTHD_MASK;
490     for (uint8_t i = 0U; i < 6U; i++)
491     {
492         __NOP();
493     }
494     /* Before de-assert the software reset, XSPI module should be disabled. */
495     XSPI_EnableModule(base, false);
496     base->MCR &= ~(XSPI_MCR_SWRSTSD_MASK | XSPI_MCR_SWRSTHD_MASK);
497     for (uint8_t i = 0U; i < 6U; i++)
498     {
499         __NOP();
500     }
501     /* After de-assert the software resets, XSPI module should be enabled. */
502     XSPI_EnableModule(base, true);
503 }
504 
505 /***************************** XSPI Controller Low-Level Inferfaces End ********************************/
506 
507 /***************************** External Device Control Low-Level Interfaces Start ********************************/
508 /*!
509  * brief Set Hyper bus X16 mode.
510  *
511  * param base XSPI peripheral base address.
512  * param x16Mode Specify X16 mode.
513  */
XSPI_SetHyperBusX16Mode(XSPI_Type * base,xspi_hyper_bus_x16_mode_t x16Mode)514 void XSPI_SetHyperBusX16Mode(XSPI_Type *base, xspi_hyper_bus_x16_mode_t x16Mode)
515 {
516     bool isEnabled = false;
517 
518     if (XSPI_CheckModuleEnabled(base))
519     {
520         isEnabled = true;
521         XSPI_EnableModule(base, false);
522     }
523 
524     base->MCR = ((base->MCR) & (~XSPI_MCR_X16_MODE_MASK)) | XSPI_MCR_X16_MODE(x16Mode);
525 
526     if (isEnabled)
527     {
528         XSPI_EnableModule(base, true);
529     }
530 }
531 
532 /*!
533  * brief Update DLL configuration.
534  *
535  * param[in] base XSPI peripheral base address.
536  * param[in] ptrDllConfig Pointer to the DLL configuration.
537  * param[in] enableDDR DDR mode is enabled or not.
538  * param[in] enableX16Mode X16 mode is enabled or not.
539  * param[in] xspiRootClk The frequency of xspi root clock, the unit is Hz.
540  */
XSPI_UpdateDllValue(XSPI_Type * base,xspi_dll_config_t * ptrDllConfig,bool enableDDR,bool enableX16Mode,uint32_t xspiRootClk)541 void XSPI_UpdateDllValue(XSPI_Type *base, xspi_dll_config_t *ptrDllConfig,
542                         bool enableDDR, bool enableX16Mode, uint32_t xspiRootClk)
543 {
544     uint32_t offsetDelayElementCount = 0UL;
545     uint8_t tapNum              = ptrDllConfig->dllCustomDelayTapNum;
546     bool enableCourseDelayLine8 = ptrDllConfig->enableCdl8;
547 
548     if (ptrDllConfig->useRefValue == true)
549     {
550         if (enableDDR)
551         {
552             tapNum                 = FSL_FEATURE_XSPI_DLL_REF_VALUE_DDR_DELAY_TAP_NUM;
553             enableCourseDelayLine8 = true;
554         }
555         else
556         {
557             tapNum                 = FSL_FEATURE_XSPI_DLL_REF_VALUE_SDR_DELAY_TAP_NUM;
558             enableCourseDelayLine8 = false;
559         }
560     }
561 
562     base->MCR |= XSPI_MCR_MDIS_MASK;
563     base->SMPR = (((base->SMPR) & (~XSPI_SMPR_DLLFSMPFA_MASK)) | XSPI_SMPR_DLLFSMPFA(tapNum));
564     base->MCR &= ~XSPI_MCR_MDIS_MASK;
565 
566     if (ptrDllConfig->dllMode == kXSPI_BypassMode)
567     {
568         /* In case of DLL mode is selected as Bypass mode. */
569         uint32_t delayElementCoarseAdjust = 0UL;
570         uint32_t delayElementFineAdjust   = 0UL;
571 
572         offsetDelayElementCount  = (uint32_t)((ptrDllConfig->useRefValue) ?
573                                        FSL_FEATURE_XSPI_DLL_REF_VALUE_BYPASS_OFFSET_DELAY_ELEMENT_COUNT :
574                                        (ptrDllConfig->dllCustomPara.bypassModePara.offsetDelayElementCount));
575         delayElementCoarseAdjust = (uint32_t)((ptrDllConfig->useRefValue) ?
576                                        FSL_FEATURE_XSPI_DLL_REF_VALUE_BYPASS_DELAY_ELEMENT_COARSE :
577                                        (ptrDllConfig->dllCustomPara.bypassModePara.delayElementCoarseValue));
578         delayElementFineAdjust   = (uint32_t)((ptrDllConfig->useRefValue) ?
579                                        FSL_FEATURE_XSPI_DLL_REF_VALUE_BYPASS_DELAY_ELEMENT_FINE :
580                                        (ptrDllConfig->dllCustomPara.bypassModePara.delayElementFineValue));
581 
582         base->DLLCR[0] &=
583             ~(XSPI_DLLCR_SLAVE_AUTO_UPDT_MASK | XSPI_DLLCR_SLV_DLY_FINE_MASK | XSPI_DLLCR_SLV_DLY_COARSE_MASK |
584               XSPI_DLLCR_SLV_FINE_OFFSET_MASK | XSPI_DLLCR_DLL_CDL8_MASK);
585         /* Enable subordinate delay chain as bypass mode. */
586         base->DLLCR[0] |= XSPI_DLLCR_SLV_EN_MASK | XSPI_DLLCR_SLV_DLL_BYPASS_MASK;
587         /* program DLL to desired delay. */
588         base->DLLCR[0] |=
589             XSPI_DLLCR_SLV_DLY_FINE(delayElementFineAdjust) | XSPI_DLLCR_SLV_DLY_COARSE(delayElementCoarseAdjust) |
590             XSPI_DLLCR_SLV_FINE_OFFSET(offsetDelayElementCount) | XSPI_DLLCR_DLL_CDL8(enableCourseDelayLine8);
591         /* Load above settings into delay chain. */
592         base->DLLCR[0] |= XSPI_DLLCR_SLV_UPD_MASK;
593 
594         while ((base->DLLSR & XSPI_DLLSR_SLVA_LOCK_MASK) == 0UL)
595         {
596         }
597         base->DLLCR[0] &= ~XSPI_DLLCR_SLV_UPD_MASK;
598     }
599     else
600     {
601         /* In case of DLL mode is selected as auto update mode. */
602         bool enableHighFreq                   = false;
603         uint32_t refCounterValue               = 0UL;
604         uint32_t resolutionValue               = 0UL;
605         uint32_t tDiv16OffsetDelayElementCount = 0UL;
606 
607         enableHighFreq =
608             (ptrDllConfig->useRefValue) ?
609                 ((xspiRootClk >= FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_FREQ_THRESHOLD) ? true : false) :
610                 ((bool)(ptrDllConfig->dllCustomPara.autoUpdateModoPara.enableHighFreq));
611         refCounterValue = (uint32_t)((ptrDllConfig->useRefValue) ?
612                               ((enableX16Mode) ? FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_X16_ENABLED_REF_COUNTER :
613                                                  FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_X16_DISABLED_REF_COUNTER) :
614                               (ptrDllConfig->dllCustomPara.autoUpdateModoPara.referenceCounter));
615         resolutionValue = (uint32_t)((ptrDllConfig->useRefValue) ?
616                               ((enableX16Mode) ? FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_X16_ENABLED_RES :
617                                                  FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_X16_DISABLE_RES) :
618                               (ptrDllConfig->dllCustomPara.autoUpdateModoPara.resolution));
619         tDiv16OffsetDelayElementCount =
620             (uint32_t)((ptrDllConfig->useRefValue) ?
621                 FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_T_DIV16_OFFSET_DELAY_ELEMENT_COUNT :
622                 (ptrDllConfig->dllCustomPara.autoUpdateModoPara.tDiv16OffsetDelayElementCount));
623         offsetDelayElementCount = (uint32_t)((ptrDllConfig->useRefValue) ?
624                                       FSL_FEATURE_XSPI_DLL_REF_VALUE_AUTOUPDATE_OFFSET_DELAY_ELEMENT_COUNT :
625                                       (ptrDllConfig->dllCustomPara.autoUpdateModoPara.offsetDelayElementCount));
626 
627         base->DLLCR[0] &= ~(XSPI_DLLCR_SLV_DLL_BYPASS_MASK | XSPI_DLLCR_DLL_CDL8_MASK | XSPI_DLLCR_SLV_DLY_OFFSET_MASK |
628                             XSPI_DLLCR_SLV_FINE_OFFSET_MASK | XSPI_DLLCR_DLLRES_MASK | XSPI_DLLCR_DLL_REFCNTR_MASK |
629                             XSPI_DLLCR_FREQEN_MASK);
630         /* Enable subordinate as auto update mode. */
631         base->DLLCR[0] |= XSPI_DLLCR_SLV_EN_MASK | XSPI_DLLCR_SLAVE_AUTO_UPDT_MASK;
632         /* program DLL to desired delay. */
633         base->DLLCR[0] |= XSPI_DLLCR_DLLRES(resolutionValue) | XSPI_DLLCR_DLL_REFCNTR(refCounterValue) |
634                           XSPI_DLLCR_SLV_FINE_OFFSET(offsetDelayElementCount) |
635                           XSPI_DLLCR_SLV_DLY_OFFSET(tDiv16OffsetDelayElementCount) | XSPI_DLLCR_FREQEN(enableHighFreq);
636         /* Load above settings into delay chain. */
637         base->DLLCR[0] |= XSPI_DLLCR_SLV_UPD_MASK;
638         base->DLLCR[0] |= XSPI_DLLCR_DLLEN_MASK;
639         base->DLLCR[0] &= ~XSPI_DLLCR_SLV_UPD_MASK;
640         while ((base->DLLSR & XSPI_DLLSR_SLVA_LOCK_MASK) == 0UL)
641         {
642         }
643     }
644 }
645 
646 /*!
647  * brief Set Data learning configurations.
648  *
649  * param[in] base XSPI peripheral base address.
650  * param[in] ptrDataLearningConfig Pointer to data learning configuration.
651  *
652  * retval kStatus_XSPI_AhbReadAccessAsserted Fail to set data learning configuration due to AHB read access asserted.
653  * retval kStatus_XSPI_IPAccessAsserted Fail to set data learning configuration due to a IP access already asserted.
654  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail to set data learning configuration due to AHB write access asserted.
655  * retval kStatus_Success  Successfully to set Data learning configurations.
656  */
XSPI_SetDataLearningConfig(XSPI_Type * base,xspi_data_learning_config_t * ptrDataLearningConfig)657 status_t XSPI_SetDataLearningConfig(XSPI_Type *base, xspi_data_learning_config_t *ptrDataLearningConfig)
658 {
659     assert(ptrDataLearningConfig != NULL);
660 
661     if (XSPI_CheckAhbReadAccessAsserted(base))
662     {
663         return kStatus_XSPI_AhbReadAccessAsserted;
664     }
665 
666     if (XSPI_CheckIPAccessAsserted(base))
667     {
668         return kStatus_XSPI_IPAccessAsserted;
669     }
670 
671     if (XSPI_CheckAhbWriteAccessAsserted(base))
672     {
673         return kStatus_XSPI_AhbWriteAccessAsserted;
674     }
675 
676     base->DLCR = ((base->DLCR) & ~(XSPI_DLCR_DLP_SEL_FA_MASK | XSPI_DLCR_DL_NONDLP_FLSH_MASK)) |
677                  (XSPI_DLCR_DLP_SEL_FA(ptrDataLearningConfig->padSelected) |
678                   XSPI_DLCR_DL_NONDLP_FLSH(ptrDataLearningConfig->deviceSupported));
679 
680     base->DLPR = ptrDataLearningConfig->pattern;
681 
682     return kStatus_Success;
683 }
684 
685 /*!
686  * brief Update address mode to access external device.
687  *
688  * param base XSPI peripheral base address.
689  * param addrMode Specify the address mode to update.
690  *
691  * retval kStatus_XSPI_AhbReadAccessAsserted Fail to update address mode due to AHB read access asserted.
692  * retval kStatus_XSPI_IPAccessAsserted Fail to update address mode due to a IP access already asserted.
693  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail to update address mode due to AHB write access asserted.
694  * retval kStatus_Success  Successfully to update address mode.
695  */
XSPI_UpdateDeviceAddrMode(XSPI_Type * base,xspi_device_addr_mode_t addrMode)696 status_t XSPI_UpdateDeviceAddrMode(XSPI_Type *base, xspi_device_addr_mode_t addrMode)
697 {
698     if (XSPI_CheckAhbReadAccessAsserted(base))
699     {
700         return kStatus_XSPI_AhbReadAccessAsserted;
701     }
702 
703     if (XSPI_CheckIPAccessAsserted(base))
704     {
705         return kStatus_XSPI_IPAccessAsserted;
706     }
707 
708     if (XSPI_CheckAhbWriteAccessAsserted(base))
709     {
710         return kStatus_XSPI_AhbWriteAccessAsserted;
711     }
712 
713     if (addrMode != kXSPI_Device4ByteAddressable)
714     {
715         base->SFACR =
716             (base->SFACR & ~(XSPI_SFACR_WA_MASK | XSPI_SFACR_WA_4B_EN_MASK)) | XSPI_SFACR_WA(addrMode);
717     }
718     else
719     {
720         base->SFACR &= ~XSPI_SFACR_WA_MASK;
721         base->SFACR |= XSPI_SFACR_WA_4B_EN_MASK;
722     }
723 
724     return kStatus_Success;
725 }
726 
727 /***************************** External Device Control Low-Level Interfaces End ********************************/
728 
729 /***************************** External Device Control Functional Interfaces Start ********************************/
730 /*!
731  * brief Set device configuration.
732  *
733  * param[in] base XSPI peripheral base address.
734  * param[in] devConfig Pointer to device configuration.
735  *
736  * retval kStatus_XSPI_AhbReadAccessAsserted Fail to set device configuration due to AHB read access asserted.
737  * retval kStatus_XSPI_IPAccessAsserted Fail to set device configuration due to a IP access already asserted.
738  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail to set device configuration due to AHB write access asserted.
739  * retval kStatus_Success  Successfully to set device configurations.
740  */
XSPI_SetDeviceConfig(XSPI_Type * base,xspi_device_config_t * devConfig)741 status_t XSPI_SetDeviceConfig(XSPI_Type *base, xspi_device_config_t *devConfig)
742 {
743     assert(devConfig != NULL);
744 
745     bool enableDDR     = false;
746     bool enableX16Mode = false;
747     uint32_t instance  = XSPI_GetInstance(base);
748 
749     if (XSPI_CheckIPAccessAsserted(base))
750     {
751         return kStatus_XSPI_IPAccessAsserted;
752     }
753 
754     if (XSPI_CheckAhbReadAccessAsserted(base))
755     {
756         return kStatus_XSPI_AhbReadAccessAsserted;
757     }
758 
759     if (XSPI_CheckAhbWriteAccessAsserted(base))
760     {
761         return kStatus_XSPI_AhbWriteAccessAsserted;
762     }
763 
764     /* Set sample clock config. */
765     bool isEnabled = false;
766 
767     if (XSPI_CheckModuleEnabled(base))
768     {
769         isEnabled = true;
770         XSPI_EnableModule(base, false);
771     }
772 
773     if (devConfig->enableCknPad)
774     {
775         base->MCR |= XSPI_MCR_CKN_FA_EN_MASK;
776     }
777     else
778     {
779         base->MCR &= ~XSPI_MCR_CKN_FA_EN_MASK;
780     }
781 
782     if (devConfig->deviceInterface == kXSPI_StrandardExtendedSPI)
783     {
784         uint8_t tmp8 = XSPI_GetPPWBFromPageSize(devConfig->interfaceSettings.strandardExtendedSPISettings.pageSize);
785         base->MCR &= ~XSPI_MCR_DQS_OUT_EN_MASK;
786         base->SFACR = (base->SFACR & ~XSPI_SFACR_PPWB_MASK) | XSPI_SFACR_PPWB(tmp8);
787     }
788     else
789     {
790         XSPI_SetHyperBusX16Mode(base, devConfig->interfaceSettings.hyperBusSettings.x16Mode);
791         if (devConfig->interfaceSettings.hyperBusSettings.x16Mode != kXSPI_x16ModeDisable)
792         {
793             enableX16Mode = true;
794         }
795         XSPI_EnableVariableLatency(base, devConfig->interfaceSettings.hyperBusSettings.enableVariableLatency);
796         base->MCR |= XSPI_MCR_DQS_OUT_EN_MASK;
797         base->SFACR = (base->SFACR & ~XSPI_SFACR_FORCE_A10_MASK) |
798                       XSPI_SFACR_FORCE_A10(devConfig->interfaceSettings.hyperBusSettings.forceBit10To1);
799     }
800 
801     base->FLSHCR = ((base->FLSHCR) & ~(XSPI_FLSHCR_TCSS_MASK | XSPI_FLSHCR_TCSH_MASK)) |
802                    (XSPI_FLSHCR_TCSS(devConfig->CSSetupTime) | XSPI_FLSHCR_TCSH(devConfig->CSHoldTime));
803 
804     if (devConfig->addrMode != kXSPI_Device4ByteAddressable)
805     {
806         base->SFACR =
807             (base->SFACR & ~(XSPI_SFACR_WA_MASK | XSPI_SFACR_WA_4B_EN_MASK)) | XSPI_SFACR_WA(devConfig->addrMode);
808     }
809     else
810     {
811         base->SFACR |= XSPI_SFACR_WA_4B_EN_MASK;
812     }
813 
814     base->SFACR =
815         (base->SFACR & ~(XSPI_SFACR_CAS_INTRLVD_MASK | XSPI_SFACR_CAS_MASK)) |
816         (XSPI_SFACR_CAS_INTRLVD(devConfig->enableCASInterleaving) | XSPI_SFACR_CAS(devConfig->columnAddrWidth));
817 
818     if (devConfig->ptrDeviceRegInfo != NULL)
819     {
820         (void)XSPI_SetSFMStatusRegInfo(base, devConfig->ptrDeviceRegInfo);
821     }
822 
823     if (devConfig->ptrDeviceDdrConfig != NULL)
824     {
825         enableDDR = devConfig->ptrDeviceDdrConfig->enableDdr;
826     }
827 
828     if (enableDDR)
829     {
830         base->FLSHCR = ((base->FLSHCR) & ~XSPI_FLSHCR_TDH_MASK) |
831                        XSPI_FLSHCR_TDH(devConfig->ptrDeviceDdrConfig->ddrDataAlignedClk);
832         base->SFACR = ((base->SFACR) & ~XSPI_SFACR_BYTE_SWAP_MASK) |
833                       XSPI_SFACR_BYTE_SWAP(devConfig->ptrDeviceDdrConfig->enableByteSwapInOctalMode);
834         base->MCR |= XSPI_MCR_DDR_EN_MASK;
835     }
836     else
837     {
838         base->MCR &= ~XSPI_MCR_DDR_EN_MASK;
839     }
840 
841     for (uint8_t i = 0U; i < XSPI_SFAD_COUNT2; i++)
842     {
843         base->SFAD[0][i] = s_xspiAmbaBase[instance] + XSPI_SFAD_TPAD(devConfig->deviceSize[i]);
844     }
845 
846     uint32_t tmp32 = (base->MCR) & ~(XSPI_MCR_DQS_FA_SEL_MASK | XSPI_MCR_DQS_EN_MASK | XSPI_MCR_DQS_LAT_EN_MASK);
847 
848     if ((devConfig->sampleClkConfig.sampleClkSource == kXSPI_SampleClkFromDqsPadLoopback) ||
849         (devConfig->sampleClkConfig.sampleClkSource == kXSPI_SampleClkFromExternalDQS))
850     {
851         tmp32 |= XSPI_MCR_DQS_EN_MASK;
852     }
853     tmp32 |= XSPI_MCR_DQS_LAT_EN(devConfig->sampleClkConfig.enableDQSLatency) |
854              XSPI_MCR_DQS_FA_SEL((uint32_t)(devConfig->sampleClkConfig.sampleClkSource) & 0x3UL);
855     base->MCR = tmp32;
856 
857     base->SMPR &= ~(XSPI_SMPR_FSPHS_MASK | XSPI_SMPR_FSDLY_MASK);
858 
859     if (devConfig->sampleClkConfig.sampleClkSource == kXSPI_SampleClkFromInvertedFullySpeedDummyPadLoopback)
860     {
861         base->SMPR |= XSPI_SMPR_FSPHS_MASK;
862     }
863 
864     if (devConfig->sampleClkConfig.sampleClkSource == kXSPI_SampleClkFromHalfSpeedDummyPadLoopback)
865     {
866         base->SMPR |= XSPI_SMPR_FSDLY_MASK;
867     }
868 
869     XSPI_UpdateDllValue(base, &(devConfig->sampleClkConfig.dllConfig), enableDDR, enableX16Mode,
870                         devConfig->xspiRootClk);
871 
872     if (isEnabled)
873     {
874         XSPI_EnableModule(base, true);
875     }
876 
877     return kStatus_Success;
878 }
879 /***************************** External Device Control Functional Interfaces End ********************************/
880 
881 /***************************** IPS Access Control Low-Level Interfaces Start ********************************/
882 /*!
883  * brief Update watermark for RX buffer.
884  *
885  * code
886  * Set watermark as 4 bytes:
887  * XSPI_UpdateRxBufferWaterMark(XSPI0, 4UL);
888  * Set watermark as 8 bytes:
889  * XSPI_UpdateRxBufferWaterMark(XSPI0, 8UL);
890  * endcode
891  *
892  *
893  * param[in] base XSPI peripheral base address.
894  * param[in] waterMark Specify the number of bytes in the RX buffer which causes XSPI to assert the watermark exceeded
895  * flag, should be in multiple of 4 bytes.
896  *
897  * retval kStatus_XSPI_IPAccessAsserted Fail to update watermark for Rx buffer, due to IP access is asserted.
898  * retval kStatus_XSPI_WaterMarkIllegal Fail to update watermark for Tx buffer, due to input watermark is not the
899  * multiple of 4 bytes.
900  * retval kStatus_Success Successful to update watermark.
901  */
XSPI_UpdateRxBufferWaterMark(XSPI_Type * base,uint32_t waterMark)902 status_t XSPI_UpdateRxBufferWaterMark(XSPI_Type *base, uint32_t waterMark)
903 {
904     assert(waterMark != 0UL);
905     assert(waterMark <= 256UL);
906 
907     if (XSPI_CheckIPAccessAsserted(base))
908     {
909         return kStatus_XSPI_IPAccessAsserted;
910     }
911 
912     if ((waterMark % 4UL) != 0UL)
913     {
914         return kStatus_XSPI_WaterMarkIllegal;
915     }
916 
917     base->RBCT = XSPI_RBCT_WMRK((waterMark / 4UL) - 1UL);
918 
919     return kStatus_Success;
920 }
921 
922 /*!
923  * brief Update watermark for TX buffer.
924  *
925  * param[in] base XSPI peripheral base address.
926  * param[in] waterMark The watermark to set, the unit is byte, should be the multiple of 4 byte.
927  *
928  * retval  kStatus_XSPI_IPAccessAsserted Fail to update watermark for Tx buffer, due to IP access is asserted.
929  * retval kStatus_XSPI_WaterMarkIllegal Fail to update watermark for Tx buffer, due to input watermark is not the
930  * multiple of 4 bytes.
931  * retval kStatus_Success Successful to update watermark.
932  */
XSPI_UpdateTxBufferWaterMark(XSPI_Type * base,uint32_t waterMark)933 status_t XSPI_UpdateTxBufferWaterMark(XSPI_Type *base, uint32_t waterMark)
934 {
935     assert(waterMark != 0UL);
936     if (XSPI_CheckIPAccessAsserted(base))
937     {
938         return kStatus_XSPI_IPAccessAsserted;
939     }
940 
941     if ((waterMark % 4UL) != 0UL)
942     {
943         return kStatus_XSPI_WaterMarkIllegal;
944     }
945 
946     base->TBCT = XSPI_TBCT_WMRK((waterMark / 4UL) - 1UL);
947 
948     return kStatus_Success;
949 }
950 
951 /*!
952  * brief Set exclusive access lock mode for the specific frad..
953  *
954  * param[in] base XSPI peripheral base address.
955  * param[in] ealMode Specify the exclusive access lock mode.
956  * param[in] fradId Specify the frad.
957  */
XSPI_SetSFPFradEALMode(XSPI_Type * base,xspi_exclusive_access_lock_mode_t ealMode,uint8_t fradId)958 void XSPI_SetSFPFradEALMode(XSPI_Type *base, xspi_exclusive_access_lock_mode_t ealMode, uint8_t fradId)
959 {
960     uint32_t fradWord3RegOffset[] = XSPI_FRAD_WORD3_REG_OFFSET_ARRAY;
961 
962     uint32_t fradWord3RegAddr = (uint32_t)base + fradWord3RegOffset[fradId];
963 
964     xspi_exclusive_access_lock_mode_t curEalMode =
965         (xspi_exclusive_access_lock_mode_t)(uint32_t)(((*(uint32_t *)fradWord3RegAddr) & XSPI_FRAD0_WORD3_EAL_MASK) >>
966                                                       XSPI_FRAD0_WORD3_EAL_SHIFT);
967 
968     if (curEalMode != ealMode)
969     {
970         if (curEalMode == kXSPI_ExclusiveAccessLockEnabled)
971         {
972             *(uint32_t *)fradWord3RegAddr = (*(uint32_t *)fradWord3RegAddr & ~XSPI_FRAD0_WORD3_EAL_MASK) |
973                                             XSPI_FRAD0_WORD3_EAL(kXSPI_ExclusiveAccessLockExceptMasterId);
974 
975             if (ealMode == kXSPI_ExclusiveAccessLockDisabled)
976             {
977                 *(uint32_t *)fradWord3RegAddr = (*(uint32_t *)fradWord3RegAddr & ~XSPI_FRAD0_WORD3_EAL_MASK);
978             }
979         }
980         else
981         {
982             *(uint32_t *)fradWord3RegAddr =
983                 (*(uint32_t *)fradWord3RegAddr & ~XSPI_FRAD0_WORD3_EAL_MASK) | XSPI_FRAD0_WORD3_EAL(ealMode);
984         }
985     }
986 }
987 
988 /*!
989  * brief Update SFP configurations, including MDAD configurations and FRAD configurations.
990  *
991  * param[in] base XSPI peripheral base address.
992  * param[in] ptrMdadConfig Pointer to the SFP MDAD configuration.
993  * param[in] ptrFradConfig Pointer to the SFP FRAD configuration.
994  */
XSPI_UpdateSFPConfig(XSPI_Type * base,xspi_sfp_mdad_config_t * ptrSfpMdadConfig,xspi_sfp_frad_config_t * ptrSfpFradConfig)995 void XSPI_UpdateSFPConfig(XSPI_Type *base,
996                           xspi_sfp_mdad_config_t *ptrSfpMdadConfig,
997                           xspi_sfp_frad_config_t *ptrSfpFradConfig)
998 {
999     bool enableSFP  = false;
1000     bool enableMdad = false;
1001     bool enableFrad = false;
1002     uint8_t i       = 0U;
1003 
1004     enableMdad = (ptrSfpMdadConfig != NULL) ? true : false;
1005     enableFrad = (ptrSfpFradConfig != NULL) ? true : false;
1006     enableSFP  = (bool)(enableFrad | enableFrad);
1007 
1008     if (enableSFP)
1009     {
1010         if (enableMdad)
1011         {
1012             base->MGC |= (XSPI_MGC_GVLDMDAD_MASK | XSPI_MGC_GVLD_MASK);
1013             uint32_t tgMdadRegAddr     = 0UL;
1014 
1015             for (i = 0U; i < XSPI_TARGET_GROUP_COUNT; i++)
1016             {
1017                 tgMdadRegAddr = (uint32_t)base + s_tgMdadRegOffset[i];
1018 
1019                 *(uint32_t *)tgMdadRegAddr = XSPI_TG0MDAD_MIDMATCH(ptrSfpMdadConfig->tgMdad[i].masterIdReference) |
1020                                              XSPI_TG0MDAD_MASKTYPE(ptrSfpMdadConfig->tgMdad[i].maskType) |
1021                                              XSPI_TG0MDAD_MASK(ptrSfpMdadConfig->tgMdad[i].mask) |
1022                                              XSPI_TG0MDAD_VLD(ptrSfpMdadConfig->tgMdad[i].assignIsValid) |
1023                                              XSPI_TG0MDAD_SA(ptrSfpMdadConfig->tgMdad[i].secureAttribute);
1024 
1025                 if (ptrSfpMdadConfig->tgMdad[i].enableDescriptorLock)
1026                 {
1027                     *(uint32_t *)tgMdadRegAddr |= XSPI_TG0MDAD_LCK_MASK;
1028                 }
1029             }
1030         }
1031 
1032         if (enableFrad)
1033         {
1034             base->MGC |= (XSPI_MGC_GVLDFRAD_MASK | XSPI_MGC_GVLD_MASK);
1035             uint32_t fradWord0RegOffset[] = XSPI_FRAD_WORD0_REG_OFFSET_ARRAY;
1036             uint32_t fradWord1RegOffset[] = XSPI_FRAD_WORD1_REG_OFFSET_ARRAY;
1037             uint32_t fradWord2RegOffset[] = XSPI_FRAD_WORD2_REG_OFFSET_ARRAY;
1038             uint32_t fradWord3RegOffset[] = XSPI_FRAD_WORD3_REG_OFFSET_ARRAY;
1039 
1040             uint32_t fradWord0RegAddr = 0UL;
1041             uint32_t fradWord1RegAddr = 0UL;
1042             uint32_t fradWord2RegAddr = 0UL;
1043             uint32_t fradWord3RegAddr = 0UL;
1044 
1045             for (i = 0U; i < XSPI_SFP_FRAD_COUNT; i++)
1046             {
1047                 fradWord0RegAddr = (uint32_t)base + fradWord0RegOffset[i];
1048                 fradWord1RegAddr = (uint32_t)base + fradWord1RegOffset[i];
1049                 fradWord2RegAddr = (uint32_t)base + fradWord2RegOffset[i];
1050                 fradWord3RegAddr = (uint32_t)base + fradWord3RegOffset[i];
1051 
1052                 /* Set the most-significant 16 bits of the starting address(64-KB alignment). */
1053                 *(uint32_t *)fradWord0RegAddr = ptrSfpFradConfig->fradConfig[i].startAddress & 0xFFFF0000UL;
1054                 /* Set the most-significant 16 bits of the ending address(64-KB alignment). */
1055                 *(uint32_t *)fradWord1RegAddr = ptrSfpFradConfig->fradConfig[i].endAddress & 0xFFFF0000UL;
1056 
1057                 XSPI_SetSFPFradEALMode(base, ptrSfpFradConfig->fradConfig[i].exclusiveAccessLock, i);
1058                 if (ptrSfpFradConfig->fradConfig[i].exclusiveAccessLock == kXSPI_ExclusiveAccessLockDisabled)
1059                 {
1060                     *(uint32_t *)fradWord2RegAddr =
1061                         (*(uint32_t *)fradWord3RegAddr &
1062                          (~(XSPI_FRAD0_WORD2_MD0ACP_MASK | XSPI_FRAD0_WORD2_MD1ACP_MASK))) |
1063                         (XSPI_FRAD0_WORD2_MD0ACP(ptrSfpFradConfig->fradConfig[i].tg0MasterAccess) |
1064                          XSPI_FRAD0_WORD2_MD1ACP(ptrSfpFradConfig->fradConfig[i].tg1MasterAccess));
1065                 }
1066                 *(uint32_t *)fradWord3RegAddr =
1067                     ((*(uint32_t *)fradWord3RegAddr) & (~(XSPI_FRAD0_WORD3_LOCK_MASK | XSPI_FRAD0_WORD3_VLD_MASK))) |
1068                     (XSPI_FRAD0_WORD3_VLD(ptrSfpFradConfig->fradConfig[i].assignIsValid) |
1069                      XSPI_FRAD0_WORD3_LOCK(ptrSfpFradConfig->fradConfig[i].descriptorLock));
1070             }
1071         }
1072     }
1073     else
1074     {
1075         base->MGC &= ~(XSPI_MGC_GVLDFRAD_MASK | XSPI_MGC_GVLDMDAD_MASK | XSPI_MGC_GVLD_MASK);
1076     }
1077 }
1078 
1079 /*!
1080  * brief Get FARD latest transaction information.
1081  *
1082  * param[in] base XSPI peripheral base address.
1083  * param[out] ptrInfo Pointer to the variable in type of ref xspi_frad_transaction_info_t to store information.
1084  * param[in] fradId Specify the frad Id.
1085  */
XSPI_GetFradLastTransactionsInfo(XSPI_Type * base,xspi_frad_transaction_info_t * ptrInfo,uint8_t fradId)1086 void XSPI_GetFradLastTransactionsInfo(XSPI_Type *base, xspi_frad_transaction_info_t *ptrInfo, uint8_t fradId)
1087 {
1088     uint32_t fradWord4RegOffset[] = XSPI_FRAD_WORD4_REG_OFFSET_ARRAY;
1089     uint32_t fradWord5RegOffset[] = XSPI_FRAD_WORD5_REG_OFFSET_ARRAY;
1090 
1091     uint32_t fradWord4RegAddr = (uint32_t)base + fradWord4RegOffset[fradId];
1092     uint32_t fradWord5RegAddr = (uint32_t)base + fradWord5RegOffset[fradId];
1093 
1094     ptrInfo->startAddr = (*(uint32_t *)fradWord4RegAddr);
1095     ptrInfo->masterId =
1096         (uint8_t)(((*(uint32_t *)fradWord5RegAddr) & XSPI_FRAD0_WORD5_CMP_MDID_MASK) >> XSPI_FRAD0_WORD5_CMP_MDID_SHIFT);
1097     ptrInfo->isSecureAccess    = (bool)(((*(uint32_t *)fradWord5RegAddr) & XSPI_FRAD0_WORD5_CMP_SA_MASK) != 0UL);
1098     ptrInfo->isPrivilegeAccess = (bool)(((*(uint32_t *)fradWord5RegAddr) & XSPI_FRAD0_WORD5_CMP_PA_MASK) != 0UL);
1099     ptrInfo->isCompError       = (bool)(((*(uint32_t *)fradWord5RegAddr) & XSPI_FRAD0_WORD5_CMP_ERR_MASK) != 0UL);
1100     ptrInfo->isCompValid       = (bool)(((*(uint32_t *)fradWord5RegAddr) & XSPI_FRAD0_WORD5_CMPVALID_MASK) != 0UL);
1101 }
1102 
1103 /*!
1104  * brief Update SFP arbitration lock timeout counter.
1105  *
1106  * note The SFP arbitration lock time out counter starts when Page program wait flag asserted.
1107  *
1108  * param[in] base XSPI peripheral base address.
1109  * param[in] countValue The count value, specify the time in IPS clock cycles.
1110  *
1111  * retval kStatus_XSPI_RegWriteLocked Write operation to related register is locked.
1112  * retval kStatus_Success Success to update timeout counter.
1113  */
XSPI_UpdateSFPArbitrationLockTimeoutCounter(XSPI_Type * base,uint32_t countValue)1114 status_t XSPI_UpdateSFPArbitrationLockTimeoutCounter(XSPI_Type *base, uint32_t countValue)
1115 {
1116     if (XSPI_CheckGlobalConfigLocked(base))
1117     {
1118         return kStatus_XSPI_RegWriteLocked;
1119     }
1120 
1121     base->SFP_ARB_TIMEOUT = countValue;
1122 
1123     return kStatus_Success;
1124 }
1125 
1126 /*!
1127  * brief Update the count of SFP access timeout counter.
1128  *
1129  * note The counter starts when any IP access pass SFP check(if enabled), and request is granted by XSPI arbiter and
1130  * XSPI is IDLE.
1131  *
1132  * note The counter does not start in case of IP access was granted by XSPI is not IDLE.
1133  *
1134  * param base XSPI peripheral base address.
1135  * param countValue The count value, specify the time in IPS clock cycles.
1136  *
1137  * retval kStatus_XSPI_RegWriteLocked Write operation to related register is locked.
1138  * retval kStatus_Success Success to update timeout counter.
1139  */
XSPI_UpdateIPAccessTimeoutCounter(XSPI_Type * base,uint32_t countValue)1140 status_t XSPI_UpdateIPAccessTimeoutCounter(XSPI_Type *base, uint32_t countValue)
1141 {
1142     if (XSPI_CheckGlobalConfigLocked(base))
1143     {
1144         return kStatus_XSPI_RegWriteLocked;
1145     }
1146 
1147     base->MTO = countValue;
1148 
1149     return kStatus_Success;
1150 }
1151 
1152 /*!
1153  * brief Get MDAD check error reason for specific target group.
1154  *
1155  * param[in] base XSPI peripheral base address.
1156  * param[in] tgId Specify the target group.
1157  *
1158  * return The details of MDAD error reason, in type of ref xspi_mdad_error_reason_t.
1159  */
XSPI_GetMdadErrorReason(XSPI_Type * base,xspi_target_group_t tgId)1160 xspi_mdad_error_reason_t XSPI_GetMdadErrorReason(XSPI_Type *base, xspi_target_group_t tgId)
1161 {
1162     uint8_t tmp8 = 0U;
1163 
1164     if (tgId == kXSPI_TargetGroup0)
1165     {
1166         tmp8 = (uint8_t)((base->TGIPCRS & XSPI_TGIPCRS_ERR_MASK) >> XSPI_TGIPCRS_ERR_SHIFT);
1167     }
1168     else
1169     {
1170         tmp8 = (uint8_t)((base->SUB_REG_MDAM_ARRAY[0].TGIPCRS_SUB & XSPI_TGIPCRS_SUB_ERR_MASK) >>
1171                          XSPI_TGIPCRS_SUB_ERR_SHIFT);
1172     }
1173 
1174     return (xspi_mdad_error_reason_t)tmp8;
1175 }
1176 
1177 /*!
1178  * brief Clear address write status for specific target group.
1179  *
1180  * param[in] base XSPI peripheral base address.
1181  * param[in] tgId Specify the target group to clear address write status.
1182  */
XSPI_ClearTgAddrWriteStatus(XSPI_Type * base,xspi_target_group_t tgId)1183 void XSPI_ClearTgAddrWriteStatus(XSPI_Type *base, xspi_target_group_t tgId)
1184 {
1185     uint32_t tgSfarsRegAddr = (uint32_t)base + (uint32_t)s_tgSfarsRegOffset[(uint8_t)(tgId)];
1186 
1187     *((uint32_t *)tgSfarsRegAddr) |= XSPI_TGSFARS_CLR_MASK;
1188 }
1189 
1190 /*!
1191  * brief Get address write status for specific target group.
1192  *
1193  * param[in] base XSPI peripheral base address.
1194  * param[in] tgId Specify the target group.
1195  * param[out] ptrStatus Pointer to the variable in type of ref xspi_tg_add_write_status_t
1196                         to store address write status.
1197  */
XSPI_GetTgAddrWriteStatus(XSPI_Type * base,xspi_target_group_t tgId,xspi_tg_add_write_status_t * ptrStatus)1198 void XSPI_GetTgAddrWriteStatus(XSPI_Type *base, xspi_target_group_t tgId, xspi_tg_add_write_status_t *ptrStatus)
1199 {
1200     uint32_t tgSfarsRegAddr = (uint32_t)base + (uint32_t)s_tgSfarsRegOffset[(uint8_t)(tgId)];
1201     uint32_t tmp32          = (*(uint32_t *)tgSfarsRegAddr);
1202 
1203     ptrStatus->managerId       = (uint8_t)(tmp32 & XSPI_TGSFARS_TG_MID_MASK);
1204     ptrStatus->secureWrite     = (bool)((tmp32 & XSPI_TGSFARS_SA_MASK) != 0UL);
1205     ptrStatus->privilegedWrite = (bool)((tmp32 & XSPI_TGSFARS_PA_MASK) != 0UL);
1206 }
1207 
1208 /*!
1209  * brief Unlock Ip access arbitration.
1210  *
1211  * param[in] base XSPI peripheral base address.
1212  * param[in] tgId Specify the target group.
1213  */
XSPI_UnlockIpAccessArbitration(XSPI_Type * base,xspi_target_group_t tgId)1214 void XSPI_UnlockIpAccessArbitration(XSPI_Type *base, xspi_target_group_t tgId)
1215 {
1216     uint32_t tgIpcrRegOffset[] = XSPI_TGIPCRS_REG_OFFSET;
1217     uint32_t tgIpcrRegAddr     = (uint32_t)base + tgIpcrRegOffset[(uint8_t)tgId];
1218 
1219     *(uint32_t *)tgIpcrRegAddr |= XSPI_SFP_TG_IPCR_ARB_UNLOCK_MASK;
1220 }
1221 
1222 /*!
1223  * brief Start IP access(including read and write).
1224  *
1225  * param[in] base XSPI peripheral base address.
1226  * param[in] addr Address of external device to read/write.
1227  * param[in] seqIndex Sequence Id of the pre-programed LUT.
1228  * param[in] byteSize Size of data to read/write, the unit of byte.
1229  * param[in] tgId Specify the target group used to write/read.
1230  * param[in] lockArbitration Lock arbitration or not.
1231  *
1232  * retval kStatus_XSPI_IpAccessAddrSettingInvalid Wrong Address input.
1233  * retval kStatus_XSPI_IpAccessIPCRInvalid Wrong seqindex or bytesize input.
1234  * retval kStatus_Success Success to start Ip access.
1235  */
XSPI_StartIpAccess(XSPI_Type * base,uint32_t addr,uint8_t seqIndex,size_t byteSize,xspi_target_group_t tgId,bool lockArbitration)1236 status_t XSPI_StartIpAccess(
1237     XSPI_Type *base, uint32_t addr, uint8_t seqIndex, size_t byteSize, xspi_target_group_t tgId, bool lockArbitration)
1238 {
1239     uint32_t tgSfarsRegAddr     = (uint32_t)base + (uint32_t)s_tgSfarsRegOffset[(uint8_t)tgId];
1240     uint32_t tgIpcrsRegAddr     = (uint32_t)base + (uint32_t)s_tgIpcrsRegOffset[(uint8_t)tgId];
1241     uint32_t sfpTgIpcrRegAddr   = (uint32_t)base + (uint32_t)s_sfpTgIpcrRegOffset[(uint8_t)tgId];
1242     uint32_t sfpTgIpSfarRegAddr = (uint32_t)base + (uint32_t)s_sfpTgIpSfarRegOffset[(uint8_t)tgId];
1243     uint32_t tgMdadRegAddr      = (uint32_t)base + (uint32_t)s_tgMdadRegOffset[(uint8_t)tgId];
1244 
1245     bool mdadEnabled = false;
1246     bool mdadValid   = false;
1247     uint32_t tmp32   = 0UL;
1248 
1249     mdadEnabled = (bool)((base->MGC & XSPI_MGC_GVLDMDAD_MASK) != 0UL);
1250     mdadValid   = (((*(uint32_t *)tgMdadRegAddr) & XSPI_TG0MDAD_VLD_MASK) != 0UL);
1251     /* Check the selected target group queue is empty. */
1252     while (((*(uint32_t *)tgSfarsRegAddr) & XSPI_TGSFARS_VLD_MASK) != 0UL)
1253     {
1254     }
1255 
1256     /* Set target address. */
1257     (*(uint32_t *)sfpTgIpSfarRegAddr) = addr;
1258 
1259     if (mdadEnabled && mdadValid)
1260     {
1261         do
1262         {
1263             tmp32 = (*(uint32_t *)tgSfarsRegAddr & (XSPI_TGSFARS_VLD_MASK | XSPI_TGSFARS_ERR_MASK));
1264             if (tmp32 == XSPI_TGSFARS_ERR_MASK)
1265             {
1266                 /* The manager ID or write attributes to set SFAR register is not valid. */
1267                 return kStatus_XSPI_IpAccessAddrSettingInvalid;
1268             }
1269         } while (tmp32 != XSPI_TGSFARS_VLD_MASK);
1270     }
1271 
1272     /* Set transfer size and sequence ID. */
1273     (*(uint32_t *)sfpTgIpcrRegAddr) = XSPI_SFP_TG_IPCR_IDATSZ(byteSize) | XSPI_SFP_TG_IPCR_ARB_LOCK(lockArbitration) |
1274                                       XSPI_SFP_TG_IPCR_SEQID(seqIndex);
1275     if (mdadEnabled && mdadValid)
1276     {
1277         do
1278         {
1279             tmp32 = (*(uint32_t *)tgIpcrsRegAddr & (XSPI_TGIPCRS_ERR_MASK | XSPI_TGIPCRS_VLD_MASK));
1280             if ((tmp32 & XSPI_TGIPCRS_ERR_MASK) != 0UL)
1281             {
1282                 return kStatus_XSPI_IpAccessIPCRInvalid;
1283             }
1284         } while ((tmp32 & XSPI_TGIPCRS_VLD_MASK) == 0UL);
1285     }
1286 
1287     /* Blocking until the IP access is granted. */
1288     while (XSPI_CheckIPAccessGranted(base) == false)
1289     {
1290     }
1291 
1292     return kStatus_Success;
1293 }
1294 
1295 /***************************** IPS Access Control Low-Level Interfaces End ********************************/
1296 
1297 /***************************** IPS Access Control Functional Interfaces Start ********************************/
1298 /*!
1299  * brief Set IP access configurations, including SFP configurations, sfp arbitration lock timeout value, Ip access
1300  * timeout value.
1301  *
1302  * param[in] base XSPI peripheral base address.
1303  * param[in] ptrIpAccessConfig Pointer to the variable which contains Ip access configurations.
1304  *
1305  * retval kStatus_XSPI_RegWriteLocked Write operation to related register is locked.
1306  * retval kStatus_Success Success to update timeout counter.
1307  */
XSPI_SetIpAccessConfig(XSPI_Type * base,xspi_ip_access_config_t * ptrIpAccessConfig)1308 status_t XSPI_SetIpAccessConfig(XSPI_Type *base, xspi_ip_access_config_t *ptrIpAccessConfig)
1309 {
1310     assert(ptrIpAccessConfig != NULL);
1311 
1312     status_t status = kStatus_Success;
1313 
1314     XSPI_ClearIPAccessSeqPointer(base);
1315     XSPI_UpdateSFPConfig(base, ptrIpAccessConfig->ptrSfpMdadConfig, ptrIpAccessConfig->ptrSfpFradConfig);
1316     status = XSPI_UpdateSFPArbitrationLockTimeoutCounter(base, ptrIpAccessConfig->sfpArbitrationLockTimeoutValue);
1317 
1318     if (status == kStatus_Success)
1319     {
1320         status = XSPI_UpdateIPAccessTimeoutCounter(base, ptrIpAccessConfig->ipAccessTimeoutValue);
1321     }
1322 
1323     return status;
1324 }
1325 
1326 /*!
1327  * brief Sends a buffer of data bytes using blocking method.
1328  * note This function blocks via polling until all bytes have been sent.
1329  * param base XSPI peripheral base address
1330  * param buffer The data bytes to send
1331  * param size The number of data bytes to send
1332  * retval kStatus_Success write success without error
1333  * retval kStatus_XSPI_SequenceExecutionTimeout sequence execution timeout
1334  * retval kStatus_XSPI_IpCommandSequenceError IP command sequence error detected
1335  * retval kStatus_XSPI_IpCommandGrantTimeout IP command grant timeout detected
1336  */
XSPI_WriteBlocking(XSPI_Type * base,uint8_t * buffer,size_t size)1337 status_t XSPI_WriteBlocking(XSPI_Type *base, uint8_t *buffer, size_t size)
1338 {
1339     status_t result = kStatus_Success;
1340     uint32_t i      = 0;
1341 
1342     /* Blocking until TX buffer is unlocked. */
1343     while (XSPI_CheckTxBuffLockOpen(base) == false)
1344     {
1345     }
1346 
1347     base->TBCT = 256UL - ((uint32_t)size / 4UL - 1UL);
1348     /* Send data buffer */
1349     while (0U != size)
1350     {
1351         result = XSPI_CheckAndClearError(base, base->ERRSTAT);
1352 
1353         if (kStatus_Success != result)
1354         {
1355             return result;
1356         }
1357 
1358         /* Write watermark level data into tx fifo . */
1359         /* Write word aligned data into tx fifo. */
1360         for (i = 0U; i < (size / 4U); i++)
1361         {
1362             while (1UL == ((base->SR & XSPI_SR_TXFULL_MASK) >> XSPI_SR_TXFULL_SHIFT))
1363             {
1364             }
1365             base->TBDR = *(uint32_t *)buffer;
1366             buffer += 4U;
1367         }
1368 
1369         /* Adjust size by the amount processed. */
1370         size -= 4U * i;
1371 
1372         /* Write word un-aligned data into tx fifo. */
1373         if (0x00U != size)
1374         {
1375             uint32_t tempVal = 0x00U;
1376 
1377             for (uint32_t j = 0U; j < size; j++)
1378             {
1379                 tempVal |= ((uint32_t)*buffer++ << (8U * j));
1380             }
1381 
1382             while (1UL == ((base->SR & XSPI_SR_TXFULL_MASK) >> XSPI_SR_TXFULL_SHIFT))
1383             {
1384             }
1385             base->TBDR = tempVal;
1386         }
1387 
1388         size = 0U;
1389     }
1390 
1391     /*clear TX Buffer Fill Flag*/
1392     base->FR = XSPI_FR_TBFF_MASK;
1393 
1394     while (XSPI_CheckIPAccessAsserted(base))
1395     {
1396     }
1397 
1398     return result;
1399 }
1400 
1401 /*!
1402  * brief Receives a buffer of data bytes using a blocking method.
1403  * note This function blocks via polling until all bytes have been sent.
1404  * param base XSPI peripheral base address
1405  * param buffer The data bytes to send
1406  * param size The number of data bytes to receive
1407  * retval kStatus_Success read success without error
1408  * retval kStatus_XSPI_SequenceExecutionTimeout sequence execution timeout
1409  * retval kStatus_XSPI_IpCommandSequenceError IP command sequence error detected
1410  * retval kStatus_XSPI_IpCommandGrantTimeout IP command grant timeout detected
1411  */
XSPI_ReadBlocking(XSPI_Type * base,uint8_t * buffer,size_t size)1412 status_t XSPI_ReadBlocking(XSPI_Type *base, uint8_t *buffer, size_t size)
1413 {
1414     uint32_t rxWatermark  = base->RBCT + 1UL;
1415     status_t status       = kStatus_Success;
1416     uint32_t i            = 0UL;
1417     uint32_t removedCount = 0UL;
1418 
1419     while ((1UL != (base->SR & XSPI_SR_BUSY_MASK)) && (((base->SR & XSPI_SR_IP_ACC_MASK) >> XSPI_SR_IP_ACC_SHIFT) != 0UL))
1420     {
1421     }
1422 
1423     /* Loop until Rx buffer watermark exceeded status asserted. */
1424     while ((base->SR & XSPI_SR_RXWE_MASK) == 0UL)
1425     {
1426         if((base->ERRSTAT & XSPI_ERRSTAT_TO_ERR_MASK) != 0UL)
1427         {
1428             base->ERRSTAT = XSPI_ERRSTAT_TO_ERR_MASK;
1429             return kStatus_Timeout;
1430         }
1431     }
1432 
1433     uint32_t adjuestedSize = size + (4UL - size % 4UL) % 4UL;
1434 
1435     if (XSPI_GetRxBufferAvailableBytesCount(base) != adjuestedSize)
1436     {
1437         return kStatus_XSPI_RxBufferEntriesCountError;
1438     }
1439     /* Send data buffer */
1440     while (0UL != size)
1441     {
1442         status = XSPI_CheckAndClearError(base, base->ERRSTAT);
1443 
1444         if (kStatus_Success != status)
1445         {
1446             break;
1447         }
1448 
1449         /* Read word aligned data from rx fifo. */
1450         if (size >= 4UL * rxWatermark)
1451         {
1452             removedCount = XSPI_GetRxBufferRemovedBytesCount(base);
1453             for (i = 0UL; i < rxWatermark; i++)
1454             {
1455                 *(uint32_t *)buffer = base->RBDR[i];
1456                 buffer += 4UL;
1457             }
1458 
1459             /* RX buffer POP, trigger RX pop event. */
1460             XSPI_TriggerRxBufferPopEvent(base);
1461             while ((XSPI_GetRxBufferRemovedBytesCount(base) - removedCount) != (rxWatermark * 4UL))
1462             {
1463             }
1464             size = size - 4UL * rxWatermark;
1465         }
1466         else
1467         {
1468             for (i = 0UL; i < (size / 4UL); i++)
1469             {
1470                 *(uint32_t *)buffer = base->RBDR[i];
1471                 buffer += 4UL;
1472             }
1473 
1474             /* Adjust size by the amount processed. */
1475             size -= 4UL * i;
1476 
1477             /* Read word un-aligned data from rx fifo. */
1478             if (0UL != size)
1479             {
1480                 uint32_t tempVal = base->RBDR[i];
1481 
1482                 for (i = 0U; i < size; i++)
1483                 {
1484                     *buffer++ = ((uint8_t)(tempVal >> (8U * i)) & 0xFFU);
1485                 }
1486             }
1487 
1488             size = 0UL;
1489         }
1490     }
1491 
1492     /* Clear Rx buffer after all entries are read out. */
1493     XSPI_ClearRxBuffer(base);
1494 
1495     return status;
1496 }
1497 
1498 /*!
1499  * brief Execute command to transfer a buffer data bytes using a blocking method.
1500  * param base XSPI peripheral base address
1501  * param xfer pointer to the transfer structure.
1502  * retval kStatus_Success command transfer success without error
1503  * retval kStatus_XSPI_SequenceExecutionTimeout sequence execution timeout
1504  * retval kStatus_XSPI_IpCommandSequenceError IP command sequence error detected
1505  * retval kStatus_XSPI_IpCommandGrantTimeout IP command grant timeout detected
1506  */
XSPI_TransferBlocking(XSPI_Type * base,xspi_transfer_t * xfer)1507 status_t XSPI_TransferBlocking(XSPI_Type *base, xspi_transfer_t *xfer)
1508 {
1509     status_t status    = kStatus_Success;
1510     uint32_t dataSize  = xfer->dataSize;
1511     uint8_t *ptrBuffer = (uint8_t *)xfer->data;
1512 
1513     if ((xfer->cmdType == kXSPI_Write) || (xfer->cmdType == kXSPI_Config))
1514     {
1515         status = XSPI_StartIpAccess(base, xfer->deviceAddress, xfer->seqIndex, dataSize, xfer->targetGroup,
1516                                     xfer->lockArbitration);
1517         if (status != kStatus_Success)
1518         {
1519             return status;
1520         }
1521         /* Clear TX buffer pointer. */
1522         XSPI_ClearTxBuffer(base);
1523 
1524         status = XSPI_WriteBlocking(base, ptrBuffer, xfer->dataSize);
1525     }
1526     else if (xfer->cmdType == kXSPI_Read)
1527     {
1528         uint32_t rxBufferWaterMark = 0UL;
1529         uint32_t transferSize      = 0UL;
1530         while (dataSize > 0UL)
1531         {
1532             XSPI_ClearRxBuffer(base);
1533 
1534             /* calculate watermark and transfer size based on data size. */
1535             if (dataSize >= XSPI_IP_RX_BUFFER_SIZE)
1536             {
1537                 /* In case of data size bigger than RX buffer size, the big size request should be
1538                 break into small size sub-request, and the transfer size of sub-request is equal to buffer size. */
1539                 rxBufferWaterMark = XSPI_IP_RX_BUFFER_SIZE >> 1UL;
1540                 transferSize      = XSPI_IP_RX_BUFFER_SIZE;
1541             }
1542             else if (dataSize > (XSPI_IP_RX_BUFFER_SIZE >> 1UL))
1543             {
1544                 /* In case of data size bigger than maximum watermark value, watermark set as maxmium value, and
1545                 transfer size is data size. */
1546                 rxBufferWaterMark = XSPI_IP_RX_BUFFER_SIZE >> 1UL;
1547                 transferSize      = dataSize;
1548             }
1549             else if (dataSize % 4UL == 0UL)
1550             {
1551                 /* In case of data size less than maxmum watermark value and is aligned with 4 bytes, watermark set
1552                 as data size, transfer size also set as data size. */
1553                 rxBufferWaterMark = dataSize;
1554                 transferSize      = dataSize;
1555             }
1556             else
1557             {
1558                 /* In case of data size is not 4-byte aligned, watermark set as closest value, and transfer size
1559                 set as data size. */
1560                 rxBufferWaterMark = (dataSize / 4UL) << 2UL;
1561                 if (rxBufferWaterMark == 0UL)
1562                 {
1563                     rxBufferWaterMark = 4UL;
1564                 }
1565                 transferSize = dataSize;
1566             }
1567             (void)XSPI_UpdateRxBufferWaterMark(base, rxBufferWaterMark);
1568 
1569             status =
1570                 XSPI_StartIpAccess(base, xfer->deviceAddress, xfer->seqIndex, transferSize, xfer->targetGroup, false);
1571             if (status != kStatus_Success)
1572             {
1573                 return status;
1574             }
1575             status = XSPI_ReadBlocking(base, ptrBuffer, transferSize);
1576             if (status == kStatus_Timeout)
1577             {
1578                 XSPI_ResetTgQueue(base);
1579                 XSPI_ResetSfmAndAhbDomain(base);
1580                 break;
1581             }
1582             dataSize -= transferSize;
1583             if (dataSize != 0UL)
1584             {
1585                 ptrBuffer += transferSize;
1586             }
1587         }
1588     }
1589     else
1590     {
1591         /* Set command. */
1592         status = XSPI_StartIpAccess(base, xfer->deviceAddress, xfer->seqIndex, 0UL, xfer->targetGroup,
1593                                     xfer->lockArbitration);
1594         if (status != kStatus_Success)
1595         {
1596             return status;
1597         }
1598     }
1599 
1600     /* Wait for bus to be idle before changing flash configuration. */
1601     while (!XSPI_GetBusIdleStatus(base))
1602     {
1603     }
1604 
1605     if (xfer->cmdType == kXSPI_Command)
1606     {
1607         status = XSPI_CheckAndClearError(base, base->ERRSTAT);
1608         while (XSPI_CheckIPAccessAsserted(base))
1609         {
1610         }
1611     }
1612 
1613     return status;
1614 }
1615 
1616 /*!
1617  * brief Initializes the XSPI handle which is used in transactional functions.
1618  *
1619  * param base XSPI peripheral base address.
1620  * param handle pointer to xspi_handle_t structure to store the transfer state.
1621  * param callback pointer to user callback function.
1622  * param userData user parameter passed to the callback function.
1623  */
XSPI_TransferCreateHandle(XSPI_Type * base,xspi_handle_t * handle,xspi_transfer_callback_t callback,void * userData)1624 void XSPI_TransferCreateHandle(XSPI_Type *base,
1625                                xspi_handle_t *handle,
1626                                xspi_transfer_callback_t callback,
1627                                void *userData)
1628 {
1629     assert(NULL != handle);
1630 
1631     uint32_t instance = XSPI_GetInstance(base);
1632 
1633     /* Zero handle. */
1634     (void)memset(handle, 0, sizeof(*handle));
1635 
1636     /* Set callback and userData. */
1637     handle->completionCallback = callback;
1638     handle->userData           = userData;
1639 
1640     /* Enable NVIC interrupt. */
1641     (void)EnableIRQ(s_xspiIrqs[instance]);
1642 }
1643 
1644 /*!
1645  * brief Performs a interrupt non-blocking transfer on the XSPI bus.
1646  *
1647  * note Calling the API returns immediately after transfer initiates. The user needs
1648  * to call XSPI_GetTransferCount to poll the transfer status to check whether
1649  * the transfer is finished. If the return status is not kStatus_XSPI_Busy, the transfer
1650  * is finished. For XSPI_Read, the dataSize should be multiple of rx watermark level, or
1651  * XSPI could not read data properly.
1652  *
1653  * param base XSPI peripheral base address.
1654  * param handle pointer to xspi_handle_t structure which stores the transfer state.
1655  * param xfer pointer to xspi_transfer_t structure.
1656  * retval kStatus_Success Successfully start the data transmission.
1657  * retval kStatus_XSPI_Busy Previous transmission still not finished.
1658  */
XSPI_TransferNonBlocking(XSPI_Type * base,xspi_handle_t * handle,xspi_transfer_t * xfer)1659 status_t XSPI_TransferNonBlocking(XSPI_Type *base, xspi_handle_t *handle, xspi_transfer_t *xfer)
1660 {
1661     //    uint32_t configValue = 0;
1662     status_t status = kStatus_Success;
1663 
1664     assert(NULL != handle);
1665     assert(NULL != xfer);
1666 
1667     /* Check if the I2C bus is idle - if not return busy status. */
1668     if (handle->state != (uint32_t)kXSPI_Idle)
1669     {
1670         status = kStatus_XSPI_Busy;
1671     }
1672     else
1673     {
1674         handle->data              = (uint8_t *)xfer->data;
1675         handle->dataSize          = xfer->dataSize;
1676         handle->transferTotalSize = xfer->dataSize;
1677         handle->state = (xfer->cmdType == kXSPI_Read) ? (uint32_t)kXSPI_BusyRead : (uint32_t)kXSPI_BusyWrite;
1678 
1679         status = XSPI_StartIpAccess(base, xfer->deviceAddress, xfer->seqIndex, xfer->dataSize, xfer->targetGroup,
1680                                     xfer->lockArbitration);
1681         if (status != kStatus_Success)
1682         {
1683             return status;
1684         }
1685 
1686         if (handle->state == (uint32_t)kXSPI_BusyRead)
1687         {
1688             XSPI_ClearRxBuffer(base);
1689             XSPI_EnableInterrupts(base, (uint32_t)kXSPI_RxBufferOverflowFlag | (uint32_t)kXSPI_RxBufferDrainFlag |
1690                                             (uint32_t)kXSPI_IpCmdtriggerErrorFlag |
1691                                             (uint32_t)kXSPI_IllegalInstructionErrorFlag);
1692         }
1693         else
1694         {
1695             /* Clear TX buffer pointer. */
1696             XSPI_ClearTxBuffer(base);
1697             XSPI_EnableInterrupts(base, (uint32_t)kXSPI_TxBufferFillFlag | (uint32_t)kXSPI_TxBufferUnderrunFlag |
1698                                             (uint32_t)kXSPI_IpCmdtriggerErrorFlag |
1699                                             (uint32_t)kXSPI_IllegalInstructionErrorFlag);
1700         }
1701     }
1702 
1703     return status;
1704 }
1705 
1706 /*!
1707  * brief Gets the master transfer status during a interrupt non-blocking transfer.
1708  *
1709  * param base XSPI peripheral base address.
1710  * param handle pointer to xspi_handle_t structure which stores the transfer state.
1711  * param count Number of bytes transferred so far by the non-blocking transaction.
1712  * retval kStatus_InvalidArgument count is Invalid.
1713  * retval kStatus_Success Successfully return the count.
1714  */
XSPI_TransferGetCount(XSPI_Type * base,xspi_handle_t * handle,size_t * count)1715 status_t XSPI_TransferGetCount(XSPI_Type *base, xspi_handle_t *handle, size_t *count)
1716 {
1717     assert(NULL != handle);
1718 
1719     status_t status = kStatus_Success;
1720 
1721     if (handle->state == (uint32_t)kXSPI_Idle)
1722     {
1723         status = kStatus_NoTransferInProgress;
1724     }
1725     else
1726     {
1727         *count = handle->transferTotalSize - handle->dataSize;
1728     }
1729 
1730     return status;
1731 }
1732 
1733 /*!
1734  * brief Aborts an interrupt non-blocking transfer early.
1735  *
1736  * note This API can be called at any time when an interrupt non-blocking transfer initiates
1737  * to abort the transfer early.
1738  *
1739  * param base XSPI peripheral base address.
1740  * param handle pointer to xspi_handle_t structure which stores the transfer state
1741  */
XSPI_TransferAbort(XSPI_Type * base,xspi_handle_t * handle)1742 void XSPI_TransferAbort(XSPI_Type *base, xspi_handle_t *handle)
1743 {
1744     assert(NULL != handle);
1745 
1746     XSPI_DisableInterrupts(base, (uint32_t)kIrqFlags);
1747     handle->state = (uint32_t)kXSPI_Idle;
1748 }
1749 
1750 /***************************** IPS Access Control Functional Interfaces End ********************************/
1751 
1752 /***************************** AHB Access Control Low-Level Interfaces Start ********************************/
1753 /*!
1754  * brief Set AHB transaction(read/write) boundary.
1755  *
1756  * note For AHB write, XSPI keeps track of the start address and then compares it with the address of subsequent
1757  * transaction receive on the AHB bus, when the input alignment is reached, XSPI negates AHB HREADY to block new
1758  * accesses. This signal will be kept low until all the previous received write data is written to external memory and
1759  * chip select is de-asserted. After that it allows next AHB write data to be received by making HREADY high thus
1760  * ensuring that transaction is split at address boundary on external memory.
1761  *
1762  * note For AHB read, XSPI checks the start address and end address to see if it is crossing the address boundary
1763  * specified by input alignment. If the transaction crosses the address boundary, then it reduces the transaction size
1764  * such that the data pre-fetch is stopped before the address boundary. Now if the AHB master is reading the data
1765  * sequentially it will get buffer hits up to the input alignment boundary. When it reaches the next address boundary
1766  * it will get a buffer miss and a new data pre-fetch will be launched towards the external memory device.
1767  *
1768  * param[in] base XSPI peripheral base address.
1769  * param[in] alignment Specify the AHB alignment, in type of ref xspi_ahb_alignment_t.
1770  *
1771  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
1772  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
1773  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
1774  * retval kStatus_Success Success to set AHB access boundary.
1775  */
XSPI_SetAhbAccessBoundary(XSPI_Type * base,xspi_ahb_alignment_t alignment)1776 status_t XSPI_SetAhbAccessBoundary(XSPI_Type *base, xspi_ahb_alignment_t alignment)
1777 {
1778 #if defined(XSPI_BFGENCR_ALIGN_MASK)
1779     if (XSPI_CheckAhbReadAccessAsserted(base))
1780     {
1781         return kStatus_XSPI_AhbReadAccessAsserted;
1782     }
1783 
1784     if (XSPI_CheckAhbWriteAccessAsserted(base))
1785     {
1786         return kStatus_XSPI_AhbWriteAccessAsserted;
1787     }
1788 
1789     if (XSPI_CheckGlobalConfigLocked(base))
1790     {
1791         return kStatus_XSPI_RegWriteLocked;
1792     }
1793 
1794     base->BFGENCR = ((base->BFGENCR & ~(XSPI_BFGENCR_ALIGN_MASK)) | XSPI_BFGENCR_ALIGN(alignment));
1795 #endif /* defined(XSPI_BFGENCR_ALIGN_MASK) */
1796 
1797     return kStatus_Success;
1798 }
1799 /*!
1800  * brief Set AHB read sequence Id.
1801  *
1802  * param[in] base XSPI peripheral base address.
1803  * param[in] seqId Specify the sequence Id in LUT used for AHB read.
1804  *
1805  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
1806  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
1807  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
1808  * retval kStatus_Success Success to set AHB read sequence Id.
1809  */
XSPI_SetAhbReadDataSeqId(XSPI_Type * base,uint8_t seqId)1810 status_t XSPI_SetAhbReadDataSeqId(XSPI_Type *base, uint8_t seqId)
1811 {
1812     if (XSPI_CheckAhbReadAccessAsserted(base))
1813     {
1814         return kStatus_XSPI_AhbReadAccessAsserted;
1815     }
1816 
1817     if (XSPI_CheckAhbWriteAccessAsserted(base))
1818     {
1819         return kStatus_XSPI_AhbWriteAccessAsserted;
1820     }
1821 
1822     if (XSPI_CheckGlobalConfigLocked(base))
1823     {
1824         return kStatus_XSPI_RegWriteLocked;
1825     }
1826 
1827     base->BFGENCR = ((base->BFGENCR & (~XSPI_BFGENCR_SEQID_MASK)) | XSPI_BFGENCR_SEQID(seqId));
1828 
1829     return kStatus_Success;
1830 }
1831 /*!
1832  * brief Set AHB write sequence Id.
1833  *
1834  * param[in] base XSPI peripheral base address.
1835  * param[in] seqId Specify the sequence Id in LUT used for AHB write.
1836  *
1837  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
1838  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
1839  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
1840  * retval kStatus_Success Success to set AHB write sequence Id.
1841  */
XSPI_SetAhbWriteDataSeqId(XSPI_Type * base,uint8_t seqId)1842 status_t XSPI_SetAhbWriteDataSeqId(XSPI_Type *base, uint8_t seqId)
1843 {
1844     if (XSPI_CheckAhbReadAccessAsserted(base))
1845     {
1846         return kStatus_XSPI_AhbReadAccessAsserted;
1847     }
1848 
1849     if (XSPI_CheckAhbWriteAccessAsserted(base))
1850     {
1851         return kStatus_XSPI_AhbWriteAccessAsserted;
1852     }
1853 
1854     if (XSPI_CheckGlobalConfigLocked(base))
1855     {
1856         return kStatus_XSPI_RegWriteLocked;
1857     }
1858 
1859     base->BFGENCR = ((base->BFGENCR & (~XSPI_BFGENCR_SEQID_WR_MASK)) | XSPI_BFGENCR_SEQID_WR(seqId));
1860 
1861     return kStatus_Success;
1862 }
1863 /*!
1864  * brief Set AHB buffer configurations.
1865  *
1866  * param[in] base XSPI peripheral base address.
1867  * param[in] ptrBuffer0Config Pointer to the variable which contain buffer0 configurations.
1868  * param[in] ptrBuffer1Config Pointer to the variable which contain buffer1 configurations.
1869  * param[in] ptrBuffer2Config Pointer to the variable which contain buffer2 configurations.
1870  * param[in] ptrBuffer3Config Pointer to the variable which contain buffer3 configurations.
1871  *
1872  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted.
1873  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted.
1874  * retval kStatus_Success Success to set AHB buffer configurations.
1875  */
XSPI_SetAhbBufferConfig(XSPI_Type * base,xspi_ahbBuffer_config_t * ptrBuffer0Config,xspi_ahbBuffer_config_t * ptrBuffer1Config,xspi_ahbBuffer_config_t * ptrBuffer2Config,xspi_ahbBuffer_config_t * ptrBuffer3Config)1876 status_t XSPI_SetAhbBufferConfig(XSPI_Type *base,
1877                                  xspi_ahbBuffer_config_t *ptrBuffer0Config,
1878                                  xspi_ahbBuffer_config_t *ptrBuffer1Config,
1879                                  xspi_ahbBuffer_config_t *ptrBuffer2Config,
1880                                  xspi_ahbBuffer_config_t *ptrBuffer3Config)
1881 {
1882     assert(ptrBuffer0Config != NULL);
1883     assert(ptrBuffer1Config != NULL);
1884     assert(ptrBuffer2Config != NULL);
1885     assert(ptrBuffer3Config != NULL);
1886 
1887     assert((ptrBuffer0Config->bufferSize + ptrBuffer1Config->bufferSize + ptrBuffer2Config->bufferSize +
1888             ptrBuffer3Config->bufferSize) == 512U);
1889 
1890     xspi_ahbBuffer_config_t *ahbBufferConfigs[4];
1891     uint32_t subBufferStartAddr; /* Specify the upper 16-bits of the start addresss range. */
1892     uint32_t subBufferEndAddr;   /* Specify the upper 16-bits of the end addresss range. */
1893     xspi_ahbBuffer_sub_buffer_config_t *curSubBufferConfig = NULL;
1894 
1895     ahbBufferConfigs[0] = ptrBuffer0Config;
1896     ahbBufferConfigs[1] = ptrBuffer1Config;
1897     ahbBufferConfigs[2] = ptrBuffer2Config;
1898     ahbBufferConfigs[3] = ptrBuffer3Config;
1899     if (XSPI_CheckAhbReadAccessAsserted(base))
1900     {
1901         return kStatus_XSPI_AhbReadAccessAsserted;
1902     }
1903 
1904     if (XSPI_CheckAhbWriteAccessAsserted(base))
1905     {
1906         return kStatus_XSPI_AhbWriteAccessAsserted;
1907     }
1908 
1909     (void)XSPI_UpdateAhbBufferSize(base, ptrBuffer0Config->bufferSize, ptrBuffer1Config->bufferSize,
1910                                    ptrBuffer2Config->bufferSize, ptrBuffer3Config->bufferSize);
1911 
1912     for (uint8_t i = 0U; i < XSPI_BUFCR_COUNT; i++)
1913     {
1914         /* The buffer size is set as transfer size, because when buffer-miss the
1915         XSPI will clear AHB buffer and fetch size of data from external device.
1916         If transfer size is less than buffer size, then left space will never used. */
1917         base->BUFCR[i] =
1918             XSPI_BUFCR_MSTRID(ahbBufferConfigs[i]->masterId) | XSPI_BUFCR_ADATSZ(ahbBufferConfigs[i]->bufferSize);
1919 
1920         /* Same bit field represent different feature for differnt AHB buffer. */
1921         if ((i == 0U) || (i == 1U))
1922         {
1923             base->BUFCR[i] |= XSPI_BUFCR_HP_EN(ahbBufferConfigs[i]->enaPri.enablePriority);
1924         }
1925 
1926         if (i == 3U)
1927         {
1928             base->BUFCR[i] |= XSPI_BUFCR_ALLMST(ahbBufferConfigs[i]->enaPri.enableAllMaster);
1929         }
1930 
1931         if ((ahbBufferConfigs[i]->ptrSubBuffer0Config == NULL) && (ahbBufferConfigs[i]->ptrSubBuffer1Config == NULL) &&
1932             (ahbBufferConfigs[i]->ptrSubBuffer2Config == NULL) && (ahbBufferConfigs[i]->ptrSubBuffer3Config == NULL))
1933         {
1934             base->BUFCR[i] &= ~XSPI_BUFCR_SUB_DIV_EN_MASK;
1935         }
1936         else
1937         {
1938             if (((1U << (4U - (uint8_t)(ahbBufferConfigs[i]->ptrSubBuffer0Config->divFactor))) +
1939                  (1U << (4U - (uint8_t)(ahbBufferConfigs[i]->ptrSubBuffer1Config->divFactor))) +
1940                  (1U << (4U - (uint8_t)(ahbBufferConfigs[i]->ptrSubBuffer2Config->divFactor)))) > 16U)
1941             {
1942                 return kStatus_XSPI_AhbSubBufferFactorError;
1943             }
1944 
1945             base->BUFCR[i] |= XSPI_BUFCR_SUB_DIV_EN_MASK |
1946                               XSPI_BUFCR_SUBBUF0_DIV(ahbBufferConfigs[i]->ptrSubBuffer0Config->divFactor) |
1947                               XSPI_BUFCR_SUBBUF1_DIV(ahbBufferConfigs[i]->ptrSubBuffer1Config->divFactor) |
1948                               XSPI_BUFCR_SUBBUF1_DIV(ahbBufferConfigs[i]->ptrSubBuffer2Config->divFactor);
1949 
1950             /* Set AHB buffer sub buffer start and end address range. */
1951             for (uint8_t j = 0U; j < XSPI_BUF_ADDR_RANGE_COUNT2; j++)
1952             {
1953                 curSubBufferConfig =
1954                     (xspi_ahbBuffer_sub_buffer_config_t *)(ahbBufferConfigs[i]->ptrSubBuffer0Config) + j;
1955                 subBufferStartAddr = ((curSubBufferConfig->startAddr) & 0xFFFF0000UL) >> 16UL;
1956                 subBufferEndAddr   = ((curSubBufferConfig->startAddr) & 0xFFFF0000UL) >> 16UL;
1957 
1958                 base->BUF_ADDR_RANGE[i][j] =
1959                     XSPI_BUF_ADDR_RANGE_STARTADR(subBufferStartAddr) | XSPI_BUF_ADDR_RANGE_ENDADR(subBufferEndAddr);
1960 
1961                 if (curSubBufferConfig->enableAhbMonitor)
1962                 {
1963                     /* Enable AHB monitor to current sub division. */
1964                     XSPI_EnableAhbBufferPerfMonitor(base, i, j);
1965                 }
1966             }
1967         }
1968     }
1969     return kStatus_Success;
1970 }
1971 
1972 /*!
1973  * brief Set AHB transaction(read/write) split size.
1974  *
1975  * note For AHB write, if enable split feature(ahbSplitSize not set as kXSPI_AhbSplitSizeDisabled), XSPI will split one
1976  * single AHB write burst into smaller bursts on external device side.
1977  *
1978  * note For AHB read, if enable split feature, HW will realign the prefetch size to split size and in that way split
1979  * a single read burst into smaller bursts.
1980  *
1981  * param[in] base XSPI peripheral base address.
1982  * param[in] ahbSplitSize Specify the AHB split size.
1983  *
1984  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
1985  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
1986  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
1987  * retval kStatus_Success Success to set AHB access split size.
1988  */
XSPI_SetAhbAccessSplitSize(XSPI_Type * base,xspi_ahb_split_size_t ahbSplitSize)1989 status_t XSPI_SetAhbAccessSplitSize(XSPI_Type *base, xspi_ahb_split_size_t ahbSplitSize)
1990 {
1991     if (XSPI_CheckAhbReadAccessAsserted(base))
1992     {
1993         return kStatus_XSPI_AhbReadAccessAsserted;
1994     }
1995 
1996     if (XSPI_CheckAhbWriteAccessAsserted(base))
1997     {
1998         return kStatus_XSPI_AhbWriteAccessAsserted;
1999     }
2000 
2001     if (XSPI_CheckGlobalConfigLocked(base))
2002     {
2003         return kStatus_XSPI_RegWriteLocked;
2004     }
2005 
2006     if (ahbSplitSize == kXSPI_AhbSplitSizeDisabled)
2007     {
2008         base->BFGENCR &= ~XSPI_BFGENCR_SPLITEN_MASK;
2009     }
2010     else
2011     {
2012         base->BFGENCR |= XSPI_BFGENCR_SPLITEN_MASK;
2013         base->BFGENCR = (base->BFGENCR & (~XSPI_BFGENCR_AHBSSIZE_MASK)) | XSPI_BFGENCR_AHBSSIZE(ahbSplitSize);
2014     }
2015 
2016     return kStatus_Success;
2017 }
2018 
2019 /*!
2020  * brief Specify how long XSPI can hold HEADY low while waiting for response to an AHB transfer.
2021  *
2022  * note If the specified time out value expired, an AHB illegal transaction error will be triggerred.
2023  *
2024  * param[in] base XSPI peripheral base address.
2025  * param[in] timeoutValue In multiples of 50000 AHB clock cycles, default value is 3,
2026  *                      it means waiting for 150000 AHB clock cycles.
2027  *
2028  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2029  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2030  * retval kStatus_Success Success to set AHB HREADY timeout value.
2031  */
XSPI_UpdateAhbHreadyTimeOutValue(XSPI_Type * base,uint16_t timeoutValue)2032 status_t XSPI_UpdateAhbHreadyTimeOutValue(XSPI_Type *base, uint16_t timeoutValue)
2033 {
2034     if (XSPI_CheckAhbReadAccessAsserted(base))
2035     {
2036         return kStatus_XSPI_AhbReadAccessAsserted;
2037     }
2038 
2039     if (XSPI_CheckAhbWriteAccessAsserted(base))
2040     {
2041         return kStatus_XSPI_AhbWriteAccessAsserted;
2042     }
2043 
2044     base->AHRDYTO = XSPI_AHRDYTO_HREADY_TO(timeoutValue);
2045 
2046     return kStatus_Success;
2047 }
2048 
2049 /*!
2050  * brief Pre-define error payload which will be provided on the read data bus when the HEADY timeout counter expires.
2051  *
2052  * note If the incoming AHB read request master is not mapped to any of the AHB buffer, the XSPI will keep the AHB
2053  * HREADY signal low and will provided this pre-defined error payload on the AHB read data bus when the HEADY timeout
2054  * counter expires.
2055  *
2056  * param[in] base XSPI peripheral base address.
2057  * param[in] highPayload High 32bits payload to set.
2058  * param[in] lowPayload Low 32bits payload to set.
2059 
2060  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2061  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2062  * retval kStatus_Success Success to set AHB HREADY timeout value.
2063  */
XSPI_SetAhbErrorPayload(XSPI_Type * base,uint32_t highPayload,uint32_t lowPayload)2064 status_t XSPI_SetAhbErrorPayload(XSPI_Type *base, uint32_t highPayload, uint32_t lowPayload)
2065 {
2066     if (XSPI_CheckAhbReadAccessAsserted(base))
2067     {
2068         return kStatus_XSPI_AhbReadAccessAsserted;
2069     }
2070 
2071     if (XSPI_CheckAhbWriteAccessAsserted(base))
2072     {
2073         return kStatus_XSPI_AhbWriteAccessAsserted;
2074     }
2075 
2076     base->AHB_ERR_PAYLOAD_HI = highPayload;
2077     base->AHB_ERR_PAYLOAD_LO = lowPayload;
2078 
2079     return kStatus_Success;
2080 }
2081 
2082 /*!
2083  * brief Return AHB read error information.
2084  *
2085  * param[in] base XSPI peripheral base address.
2086  *
2087  * return Latest AHB read error information, in type of xspi_ahb_read_error_info_t.
2088  */
XSPI_ReturnAhbReadErrorInfo(XSPI_Type * base)2089 xspi_ahb_read_error_info_t XSPI_ReturnAhbReadErrorInfo(XSPI_Type *base)
2090 {
2091     xspi_ahb_read_error_info_t errorInfo;
2092 
2093     errorInfo.errorAddr   = base->AHB_RD_ERR_ADDR;
2094     errorInfo.errMasterId = (uint8_t)(base->AHB_RD_ERR_MID & XSPI_AHB_RD_ERR_MID_REMID_MASK);
2095 
2096     return errorInfo;
2097 }
2098 
2099 /*!
2100  * brief Get Ahb request suspend information if priority mechanism is enabled.
2101  *
2102  * param[in] base XSPI peripheral base address.
2103  * param[out] ptrSuspendInfo Contain latest suspend info, the value may be 0xFF if the item is invalid in current
2104  * suspend state.
2105  */
XSPI_GetAhbRequestSuspendInfo(XSPI_Type * base,xspi_ahb_request_suspend_info_t * ptrSuspendInfo)2106 void XSPI_GetAhbRequestSuspendInfo(XSPI_Type *base, xspi_ahb_request_suspend_info_t *ptrSuspendInfo)
2107 {
2108     uint32_t tmp32 = 0UL;
2109 
2110     tmp32 = base->SPNDST;
2111 
2112     ptrSuspendInfo->state       = (xspi_ahb_request_suspend_state_t)(uint8_t)(tmp32 & XSPI_SPNDST_STATE_MASK);
2113     ptrSuspendInfo->subBufferId = 0xFFU;
2114     ptrSuspendInfo->ahbBufferId = 0xFFU;
2115     ptrSuspendInfo->dataLeft    = 0xFFFFU;
2116     ptrSuspendInfo->address     = 0xFFFFFFFFUL;
2117 
2118     if ((ptrSuspendInfo->state == kXSPI_AhbRequestSuspended) || (ptrSuspendInfo->state == kXSPI_AhbReqestResumed))
2119     {
2120         ptrSuspendInfo->subBufferId = (uint8_t)((tmp32 & XSPI_SPNDST_SPDSBUF_MASK) >> XSPI_SPNDST_SPDSBUF_SHIFT);
2121         ptrSuspendInfo->ahbBufferId = (uint8_t)((tmp32 & XSPI_SPNDST_SPDBUF_MASK) >> XSPI_SPNDST_SPDBUF_SHIFT);
2122         ptrSuspendInfo->dataLeft    = (uint16_t)((tmp32 & XSPI_SPNDST_DATLFT_MASK) >> XSPI_SPNDST_DATLFT_SHIFT);
2123 
2124         if (ptrSuspendInfo->state == kXSPI_AhbRequestSuspended)
2125         {
2126             ptrSuspendInfo->address = base->SPNDST_ADDR;
2127         }
2128     }
2129 }
2130 
2131 /*!
2132  * brief Enable/disable the clearing of AHB read prefetch buffers when the same flash address is written by an
2133  *   AHB or IP command.
2134  *
2135  * param[in] base XSPI peripheral base address.
2136  * param[in] enable Used to enable/disable write flush.
2137  *
2138  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted.
2139  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted.
2140  * retval kStatus_Success Successfully to enable/disable AHB buffer write flush.
2141  */
XSPI_EnableAhbBufferWriteFlush(XSPI_Type * base,bool enable)2142 status_t XSPI_EnableAhbBufferWriteFlush(XSPI_Type *base, bool enable)
2143 {
2144     if (XSPI_CheckAhbReadAccessAsserted(base))
2145     {
2146         return kStatus_XSPI_AhbReadAccessAsserted;
2147     }
2148 
2149     if (XSPI_CheckAhbWriteAccessAsserted(base))
2150     {
2151         return kStatus_XSPI_AhbWriteAccessAsserted;
2152     }
2153 
2154     if (XSPI_CheckGlobalConfigLocked(base))
2155     {
2156         return kStatus_XSPI_RegWriteLocked;
2157     }
2158 
2159     if (enable)
2160     {
2161         base->BFGENCR |= XSPI_BFGENCR_WR_FLUSH_EN_MASK;
2162     }
2163     else
2164     {
2165         base->BFGENCR &= ~XSPI_BFGENCR_WR_FLUSH_EN_MASK;
2166     }
2167 
2168     return kStatus_Success;
2169 }
2170 
2171 /*!
2172  * brief Block access(write and read) after the AHB write operation.
2173  *
2174  * note If external flash support RWW, read should not be blocked:
2175  * code
2176  *  XSPI_BlockAccessAfterAhbWrite(XSPI0, true, false);
2177  * endcode
2178  *
2179  * note If either of access is blocked, the page program wait flag will be asserted after
2180  * an AHB write sequence completed. The assertion of flag will lock the arbitration and all
2181  * access to the external memory are blocked. And the internal "page wait time" counter
2182  * starts(Invoke XSPI_UpdatePageWaitTimeCounter to update counter value). After this counter
2183  * reaches the value, a read is triggered by the XSPI module to read external device's
2184  * status register(The seq id should be pre-defiend by XSPI_SetAhbReadStatusRegSeqId),
2185  * and the value is stored in the XSPI internal regsiter. And there are two
2186  * options(Invoke XSPI_SelectPPWFlagClearPolicy to select) to clear the asserted page program wait flag.
2187  *      1. Automatic cleared by XSPI hardware;
2188  *      2. Cleared by software.
2189  *
2190  * note As soon as the page program wait flag is asserted, another counter(SFP arbitration
2191  * lock counter) also started, if the flag not cleared in desired counter value the
2192  * arbitration lock timeout error will be generated.
2193  *
2194  * param[in] base XSPI peripheral base address.
2195  * param[in] blockSequentWrite Block sequence write or not.
2196  * param[in] blockRead Block read or not.
2197  *
2198  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2199  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2200  * retval kStatus_Success Success to set related registers..
2201  */
XSPI_BlockAccessAfterAhbWrite(XSPI_Type * base,bool blockSequentWrite,bool blockRead)2202 status_t XSPI_BlockAccessAfterAhbWrite(XSPI_Type *base, bool blockSequentWrite, bool blockRead)
2203 {
2204     uint32_t tmp32 = 0UL;
2205 
2206     if (XSPI_CheckAhbReadAccessAsserted(base))
2207     {
2208         return kStatus_XSPI_AhbReadAccessAsserted;
2209     }
2210 
2211     if (XSPI_CheckAhbWriteAccessAsserted(base))
2212     {
2213         return kStatus_XSPI_AhbWriteAccessAsserted;
2214     }
2215 
2216     tmp32 = base->AWRCR;
2217     tmp32 &= ~(XSPI_AWRCR_PPW_RD_DIS_MASK | XSPI_AWRCR_PPW_WR_DIS_MASK);
2218     tmp32 |= XSPI_AWRCR_PPW_RD_DIS(blockRead) | XSPI_AWRCR_PPW_WR_DIS(blockSequentWrite);
2219 
2220     base->AWRCR = tmp32;
2221 
2222     return kStatus_Success;
2223 }
2224 
2225 /*!
2226  * brief Set Page program wait flag clear policy.
2227  *
2228  * note If set as hardware policy, the device's WIP information should be set by invoking XSPI_SetSFMStatusRegInfo()
2229  *
2230  * param[in] base XSPI peripheral base address.
2231  * param[in] policy Specify the policy to clear page program wait flag.
2232  *
2233  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2234  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2235  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
2236  * retval kStatus_Success Success to set PPW flag clear policy.
2237  */
XSPI_SelectPPWFlagClearPolicy(XSPI_Type * base,xspi_ppw_flag_clear_policy_t policy)2238 status_t XSPI_SelectPPWFlagClearPolicy(XSPI_Type *base, xspi_ppw_flag_clear_policy_t policy)
2239 {
2240     if (XSPI_CheckAhbReadAccessAsserted(base))
2241     {
2242         return kStatus_XSPI_AhbReadAccessAsserted;
2243     }
2244 
2245     if (XSPI_CheckAhbWriteAccessAsserted(base))
2246     {
2247         return kStatus_XSPI_AhbWriteAccessAsserted;
2248     }
2249 
2250     if (XSPI_CheckGlobalConfigLocked(base))
2251     {
2252         return kStatus_XSPI_RegWriteLocked;
2253     }
2254 
2255     if (policy == kXSPI_HardwareClearPPWFlag)
2256     {
2257         base->BFGENCR |= XSPI_BFGENCR_PPWF_CLR_MASK;
2258     }
2259     else
2260     {
2261         base->BFGENCR &= ~XSPI_BFGENCR_PPWF_CLR_MASK;
2262     }
2263 
2264     return kStatus_Success;
2265 }
2266 
2267 /*!
2268  * brief Update page wait timeout counter.
2269  *
2270  * param[in] base XSPI peripheral base address.
2271  * param[in] countValue The value of counter, in AHB clock cycles.
2272  *
2273  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2274  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2275  * retval kStatus_XSPI_PageProgramWaitFlagAsserted Page program wait flag already asserted.
2276  * retval kStatus_Success Successfully to update page wait timeout counter.
2277  */
XSPI_UpdatePageWaitTimeCounter(XSPI_Type * base,uint32_t countValue)2278 status_t XSPI_UpdatePageWaitTimeCounter(XSPI_Type *base, uint32_t countValue)
2279 {
2280     if (XSPI_CheckAhbReadAccessAsserted(base))
2281     {
2282         return kStatus_XSPI_AhbReadAccessAsserted;
2283     }
2284 
2285     if (XSPI_CheckAhbWriteAccessAsserted(base))
2286     {
2287         return kStatus_XSPI_AhbWriteAccessAsserted;
2288     }
2289 
2290     if ((base->FR & XSPI_FR_PPWF_MASK) != 0UL)
2291     {
2292         return kStatus_XSPI_PageProgramWaitFlagAsserted;
2293     }
2294 
2295     base->PPWF_TCNT = countValue;
2296 
2297     return kStatus_Success;
2298 }
2299 
2300 /*!
2301  * brief Set AHB read status register sequence Id
2302  *
2303  * note The read status sequence only triggerred when the page wait time counter expired.
2304  *
2305  * param[in] base XSPI peripheral base address.
2306  * param[in] seqId Specify the sequence Id in LUT used for AHB read status register.
2307  *
2308  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2309  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2310  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
2311  * retval kStatus_Success Success to set AHB read status register sequence Id.
2312  */
XSPI_SetAhbReadStatusRegSeqId(XSPI_Type * base,uint8_t seqId)2313 status_t XSPI_SetAhbReadStatusRegSeqId(XSPI_Type *base, uint8_t seqId)
2314 {
2315     if (XSPI_CheckAhbReadAccessAsserted(base))
2316     {
2317         return kStatus_XSPI_AhbReadAccessAsserted;
2318     }
2319 
2320     if (XSPI_CheckAhbWriteAccessAsserted(base))
2321     {
2322         return kStatus_XSPI_AhbWriteAccessAsserted;
2323     }
2324 
2325     if (XSPI_CheckGlobalConfigLocked(base))
2326     {
2327         return kStatus_XSPI_RegWriteLocked;
2328     }
2329 
2330     base->BFGENCR = ((base->BFGENCR & (~XSPI_BFGENCR_SEQID_RDSR_MASK)) | XSPI_BFGENCR_SEQID_RDSR(seqId));
2331 
2332     return kStatus_Success;
2333 }
2334 
2335 /*!
2336  * brief Set Serial flash memory status register information
2337  *
2338  * param[in] base XSPI peripheral base address.
2339  * param[in] ptrStatusRegInfo Pointer to the variable which contain status register information.
2340  *
2341  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2342  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2343  * retval kStatus_XSPI_PageProgramWaitFlagAsserted Page program wait flag already asserted.
2344  * retval kStatus_Success Successfully to set status register information.
2345  */
XSPI_SetSFMStatusRegInfo(XSPI_Type * base,xspi_device_status_reg_info_t * ptrStatusRegInfo)2346 status_t XSPI_SetSFMStatusRegInfo(XSPI_Type *base, xspi_device_status_reg_info_t *ptrStatusRegInfo)
2347 {
2348     assert(ptrStatusRegInfo != NULL);
2349 
2350     if (XSPI_CheckAhbReadAccessAsserted(base))
2351     {
2352         return kStatus_XSPI_AhbReadAccessAsserted;
2353     }
2354 
2355     if (XSPI_CheckAhbWriteAccessAsserted(base))
2356     {
2357         return kStatus_XSPI_AhbWriteAccessAsserted;
2358     }
2359 
2360     if ((base->FR & XSPI_FR_PPWF_MASK) != 0UL)
2361     {
2362         return kStatus_XSPI_PageProgramWaitFlagAsserted;
2363     }
2364 
2365     uint32_t tmp32 = 0UL;
2366 
2367     tmp32 = (base->PPW_RDSR) &
2368             ~(XSPI_PPW_RDSR_RDSR_VAL_CHK_MASK | XSPI_PPW_RDSR_RDSR_HWORD_SEL_MASK | XSPI_PPW_RDSR_LOC_MASK);
2369 
2370     tmp32 |= XSPI_PPW_RDSR_RDSR_VAL_CHK(ptrStatusRegInfo->value1Expired) |
2371              XSPI_PPW_RDSR_RDSR_HWORD_SEL(ptrStatusRegInfo->upperHalfWordSelected) |
2372              XSPI_PPW_RDSR_LOC(ptrStatusRegInfo->wipLocation);
2373 
2374     base->PPW_RDSR = tmp32;
2375 
2376     return kStatus_Success;
2377 }
2378 
2379 /*!
2380  * brief Set Buffer size for all 4 AHB buffers.
2381  *
2382  * param[in] base XSPI peripheral base address.
2383  * param[in] buf0Size Specify size of AHB buffer0, range of 512, 256, 128, 64, 32, 16, 8, 4, 2, 0.
2384  * param[in] buf1Size Specify size of AHB buffer1, range of 512, 256, 128, 64, 32, 16, 8, 4, 2, 0.
2385  * param[in] buf2Size Specify size of AHB buffer2, range of 512, 256, 128, 64, 32, 16, 8, 4, 2, 0.
2386  * param[in] buf3Size Specify size of AHB buffer3, range of 512, 256, 128, 64, 32, 16, 8, 4, 2, 0.
2387  *
2388  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted.
2389  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted.
2390  * retval kStatus_Success Success to set AHB buffer size.
2391  */
XSPI_UpdateAhbBufferSize(XSPI_Type * base,uint16_t buf0Size,uint16_t buf1Size,uint16_t buf2Size,uint16_t buf3Size)2392 status_t XSPI_UpdateAhbBufferSize(
2393     XSPI_Type *base, uint16_t buf0Size, uint16_t buf1Size, uint16_t buf2Size, uint16_t buf3Size)
2394 {
2395     /* The total size of AHB buffer is 4KB. */
2396     assert((buf0Size + buf1Size + buf2Size + buf3Size) == 512U);
2397 
2398     if (XSPI_CheckAhbReadAccessAsserted(base))
2399     {
2400         return kStatus_XSPI_AhbReadAccessAsserted;
2401     }
2402 
2403     if (XSPI_CheckAhbWriteAccessAsserted(base))
2404     {
2405         return kStatus_XSPI_AhbWriteAccessAsserted;
2406     }
2407 
2408     base->BUFIND[0] = XSPI_BUFIND_TPINDX(buf0Size);
2409     base->BUFIND[1] = XSPI_BUFIND_TPINDX((uint32_t)buf0Size + (uint32_t)buf1Size);
2410     base->BUFIND[2] = XSPI_BUFIND_TPINDX((uint32_t)buf0Size + (uint32_t)buf1Size + (uint32_t)buf2Size);
2411 
2412     (void)buf3Size;
2413 
2414     return kStatus_Success;
2415 }
2416 
2417 /*!
2418  * brief Get status of AHB sub buffer.
2419  *
2420  * note This function only useful when sub-division feature of the selected AHB buffer is enabled.
2421  *
2422  * param[in] base XSPI peripheral base address.
2423  * param[in] ahbBufferId The Id of AHB buffer, range from 0 to 3.
2424  * param[in] subBufferId The Id of AHB buffer sub division, range from 0 to 3.
2425  *
2426  * return Current status of selected AHB sub buffer, in type of ref xspi_ahb_sub_buffer_status_t.
2427  */
XSPI_GetAhbSubBufferStatus(XSPI_Type * base,uint8_t ahbBufferId,uint8_t subBufferId)2428 xspi_ahb_sub_buffer_status_t XSPI_GetAhbSubBufferStatus(XSPI_Type *base, uint8_t ahbBufferId, uint8_t subBufferId)
2429 {
2430     uint32_t tmp32 = 0UL;
2431     uint32_t shift = 8UL * (uint32_t)ahbBufferId + 2UL * (uint32_t)subBufferId;
2432     uint32_t mask  = 0x3UL << shift;
2433 
2434     tmp32 = base->AHB_BUF_STATUS;
2435     return (xspi_ahb_sub_buffer_status_t)(uint32_t)((tmp32 & mask) >> shift);
2436 }
2437 
2438 /*!
2439  * brief Enable AHB buffer performance monitor for selected AHB buffer's sub buffer.
2440  *
2441  * param[in] base XSPI peripheral base address.
2442  * param[in] ahbBufferId Specify the selected AHB buffer.
2443  * param[in] subBufferId Specify the selected sub-buffer.
2444  */
XSPI_EnableAhbBufferPerfMonitor(XSPI_Type * base,uint8_t ahbBufferId,uint8_t subBufferId)2445 void XSPI_EnableAhbBufferPerfMonitor(XSPI_Type *base, uint8_t ahbBufferId, uint8_t subBufferId)
2446 {
2447     uint32_t tmp32 = 0UL;
2448 
2449     tmp32 = base->AHB_PERF_CTRL;
2450 
2451     tmp32 &= ~(XSPI_AHB_PERF_CTRL_SUB_BUF_SEL0_MASK << (2UL * (uint32_t)ahbBufferId));
2452     tmp32 |= XSPI_AHB_PERF_CTRL_SUB_BUF_SEL0(subBufferId) << (2UL * (uint32_t)ahbBufferId);
2453 
2454     tmp32 |= (uint32_t)XSPI_AHB_PERF_CTRL_BUF0_EN_MASK << (uint32_t)(ahbBufferId);
2455 
2456     base->AHB_PERF_CTRL = tmp32;
2457 }
2458 /***************************** AHB Access Control Low-Level Interfaces End ********************************/
2459 
2460 /***************************** AHB Access Control Functional Interfaces Start ********************************/
2461 /*!
2462  * brief Set AHB access configuration.
2463  *
2464  * param[in] base XSPI peripheral base address.
2465  * param[in] ptrAhbAccessConfig Pointer to the variable which contains AHB access configurations.
2466  *
2467  * retval kStatus_XSPI_AhbReadAccessAsserted Fail due to an AHB read access already asserted
2468  * retval kStatus_XSPI_AhbWriteAccessAsserted Fail due to an AHB write access already asserted
2469  * retval kStatus_XSPI_RegWriteLocked Fail due to write operation to related registers is locked.
2470  * retval kStatus_Success Success to set AHB read status register sequence Id.
2471  */
XSPI_SetAhbAccessConfig(XSPI_Type * base,xspi_ahb_access_config_t * ptrAhbAccessConfig)2472 status_t XSPI_SetAhbAccessConfig(XSPI_Type *base, xspi_ahb_access_config_t *ptrAhbAccessConfig)
2473 {
2474     assert(ptrAhbAccessConfig != NULL);
2475 
2476     if (XSPI_CheckAhbReadAccessAsserted(base))
2477     {
2478         return kStatus_XSPI_AhbReadAccessAsserted;
2479     }
2480 
2481     if (XSPI_CheckAhbWriteAccessAsserted(base))
2482     {
2483         return kStatus_XSPI_AhbWriteAccessAsserted;
2484     }
2485 
2486     if (XSPI_CheckGlobalConfigLocked(base))
2487     {
2488         return kStatus_XSPI_RegWriteLocked;
2489     }
2490 
2491     /* Configure AHB buffers. */
2492     (void)XSPI_SetAhbBufferConfig(base, &(ptrAhbAccessConfig->buffer[0]), &(ptrAhbAccessConfig->buffer[1]),
2493                                   &(ptrAhbAccessConfig->buffer[2]), &(ptrAhbAccessConfig->buffer[3]));
2494     (void)XSPI_SetAhbReadDataSeqId(base, ptrAhbAccessConfig->ARDSeqIndex);
2495     (void)XSPI_EnableAhbBufferWriteFlush(base, ptrAhbAccessConfig->enableAHBBufferWriteFlush);
2496     XSPI_EnableAhbReadPrefetch(base, ptrAhbAccessConfig->enableAHBPrefetch);
2497     (void)XSPI_SetAhbAccessSplitSize(base, ptrAhbAccessConfig->ahbSplitSize);
2498     (void)XSPI_SetAhbAccessBoundary(base, ptrAhbAccessConfig->ahbAlignment);
2499 
2500     base->AWRCR &= ~(XSPI_AWRCR_PPW_RD_DIS_MASK | XSPI_AWRCR_PPW_WR_DIS_MASK);
2501 
2502     if (ptrAhbAccessConfig->ptrAhbWriteConfig != NULL)
2503     {
2504         (void)XSPI_BlockAccessAfterAhbWrite(base, ptrAhbAccessConfig->ptrAhbWriteConfig->blockSequenceWrite,
2505                                             ptrAhbAccessConfig->ptrAhbWriteConfig->blockRead);
2506         (void)XSPI_SelectPPWFlagClearPolicy(base, ptrAhbAccessConfig->ptrAhbWriteConfig->policy);
2507 
2508         (void)XSPI_UpdatePageWaitTimeCounter(base, ptrAhbAccessConfig->ptrAhbWriteConfig->pageWaitTimeoutValue);
2509 
2510         base->BFGENCR = (base->BFGENCR & ~(XSPI_BFGENCR_SEQID_WR_MASK | XSPI_BFGENCR_SEQID_RDSR_MASK)) |
2511                         XSPI_BFGENCR_SEQID_WR(ptrAhbAccessConfig->ptrAhbWriteConfig->AWRSeqIndex) |
2512                         XSPI_BFGENCR_SEQID_WR_EN_MASK |
2513                         XSPI_BFGENCR_SEQID_RDSR(ptrAhbAccessConfig->ptrAhbWriteConfig->ARDSRSeqIndex);
2514     }
2515 
2516     (void)XSPI_SetAhbErrorPayload(base, ptrAhbAccessConfig->ahbErrorPayload.highPayload,
2517                                   ptrAhbAccessConfig->ahbErrorPayload.lowPayload);
2518 
2519     return kStatus_Success;
2520 }
2521 
2522 /***************************** AHB Access Control Functional Interfaces End ********************************/
2523