1 /*
2 * Copyright 2021-2024 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6 /**
7 * @file Spi_Ip.c
8 * @implements Spi_Ip.c_Artifact
9 *
10 * @brief SPI low-level driver implementations.
11 * @details SPI low-level driver implementations.
12 *
13 * @addtogroup SPI_IP_DRIVER Spi Ip Driver
14 * @{
15 */
16
17 #ifdef __cplusplus
18 extern "C"
19 {
20 #endif
21
22
23 /*==================================================================================================
24 * INCLUDE FILES
25 * 1) system and project includes
26 * 2) needed interfaces from external units
27 * 3) internal and external interfaces from this unit
28 ==================================================================================================*/
29 #include "Mcal.h"
30 #include "Spi_Ip.h"
31 #include "Spi_Ip_Cfg.h"
32 #include "OsIf.h"
33 #if (SPI_IP_DMA_USED == STD_ON)
34 #include "Dma_Ip.h"
35 #include "Dma_Ip_Hw_Access.h"
36 #endif
37 #if (STD_ON == SPI_IP_ENABLE_USER_MODE_SUPPORT)
38 #define USER_MODE_REG_PROT_ENABLED (SPI_IP_ENABLE_USER_MODE_SUPPORT)
39 #include "RegLockMacros.h"
40 #endif
41 #include "SchM_Spi.h"
42 #if (STD_ON == SPI_IP_DEV_ERROR_DETECT)
43 #include "Devassert.h"
44 #endif
45
46 /*==================================================================================================
47 * SOURCE FILE VERSION INFORMATION
48 ==================================================================================================*/
49 #define SPI_IP_VENDOR_ID_C 43
50 #define SPI_IP_AR_RELEASE_MAJOR_VERSION_C 4
51 #define SPI_IP_AR_RELEASE_MINOR_VERSION_C 7
52 #define SPI_IP_AR_RELEASE_REVISION_VERSION_C 0
53 #define SPI_IP_SW_MAJOR_VERSION_C 2
54 #define SPI_IP_SW_MINOR_VERSION_C 0
55 #define SPI_IP_SW_PATCH_VERSION_C 0
56 /*==================================================================================================
57 * FILE VERSION CHECKS
58 ==================================================================================================*/
59 #ifndef DISABLE_MCAL_INTERMODULE_ASR_CHECK
60 /* Check if current file and Mcal header file are of the same Autosar version */
61 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != MCAL_AR_RELEASE_MAJOR_VERSION) || \
62 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != MCAL_AR_RELEASE_MINOR_VERSION))
63 #error "AutoSar Version Numbers of Spi_Ip.c and Mcal.h are different"
64 #endif
65
66 #if (SPI_IP_DMA_USED == STD_ON)
67 /* Check if current file and Dma_Ip header file are of the same Autosar version */
68 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != DMA_IP_AR_RELEASE_MAJOR_VERSION) || \
69 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != DMA_IP_AR_RELEASE_MINOR_VERSION))
70 #error "AutoSar Version Numbers of Spi_Ip.c and Dma_Ip.h are different"
71 #endif
72 #endif
73
74 #if (STD_ON == SPI_IP_ENABLE_USER_MODE_SUPPORT)
75 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != REGLOCKMACROS_AR_RELEASE_MAJOR_VERSION) || \
76 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != REGLOCKMACROS_AR_RELEASE_MINOR_VERSION))
77 #error "AutoSar Version Numbers of Spi_Ip.c and RegLockMacros.h are different"
78 #endif
79 #endif
80
81 /* Check if the current file and SchM_Spi.h header file are of the same version */
82 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != SCHM_SPI_AR_RELEASE_MAJOR_VERSION) || \
83 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != SCHM_SPI_AR_RELEASE_MINOR_VERSION) \
84 )
85 #error "AutoSar Version Numbers of Spi_Ip.c and SchM_Spi.h are different"
86 #endif
87
88 /* Check if the current file and OsIf.h header file are of the same version */
89 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != OSIF_AR_RELEASE_MAJOR_VERSION) || \
90 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != OSIF_AR_RELEASE_MINOR_VERSION) \
91 )
92 #error "AutoSar Version Numbers of Spi_Ip.c and OsIf.h are different"
93 #endif
94 #endif
95
96 #if (STD_ON == SPI_IP_DEV_ERROR_DETECT)
97 #ifndef DISABLE_MCAL_INTERMODULE_ASR_CHECK
98 /* Check if Spi_Ip source file and Devassert.h header file are of the same release version */
99 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_C != DEVASSERT_AR_RELEASE_MAJOR_VERSION) || \
100 (SPI_IP_AR_RELEASE_MINOR_VERSION_C != DEVASSERT_AR_RELEASE_MINOR_VERSION) \
101 )
102 #error "AutoSar Version Numbers of Spi_Ip.c and Devassert.h are different"
103 #endif
104 #endif
105 #endif /* (STD_ON == SPI_IP_DEV_ERROR_DETECT) */
106
107 /* Check if Spi_Ip.h and Spi_Ip.c are of the same vendor */
108 #if (SPI_IP_VENDOR_ID != SPI_IP_VENDOR_ID_C)
109 #error "Spi_Ip.h and Spi_Ip.c have different vendor ids"
110 #endif
111 /* Check if Spi_Ip.h file and Spi_Ip.c file are of the same Autosar version */
112 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION != SPI_IP_AR_RELEASE_MAJOR_VERSION_C) || \
113 (SPI_IP_AR_RELEASE_MINOR_VERSION != SPI_IP_AR_RELEASE_MINOR_VERSION_C) || \
114 (SPI_IP_AR_RELEASE_REVISION_VERSION != SPI_IP_AR_RELEASE_REVISION_VERSION_C))
115 #error "AutoSar Version Numbers of Spi_Ip.h and Spi_Ip.c are different"
116 #endif
117 #if ((SPI_IP_SW_MAJOR_VERSION != SPI_IP_SW_MAJOR_VERSION_C) || \
118 (SPI_IP_SW_MINOR_VERSION != SPI_IP_SW_MINOR_VERSION_C) || \
119 (SPI_IP_SW_PATCH_VERSION != SPI_IP_SW_PATCH_VERSION_C))
120 #error "Software Version Numbers of Spi_Ip.h and Spi_Ip.c are different"
121 #endif
122
123 /* Check if Spi_Ip_Cfg.h and Spi_Ip.c are of the same vendor */
124 #if (SPI_IP_VENDOR_ID_CFG != SPI_IP_VENDOR_ID_C)
125 #error "Spi_Ip_Cfg.h and Spi_Ip.c have different vendor ids"
126 #endif
127 /* Check if Spi_Ip_Cfg.h file and Spi_Ip.c file are of the same Autosar version */
128 #if ((SPI_IP_AR_RELEASE_MAJOR_VERSION_CFG != SPI_IP_AR_RELEASE_MAJOR_VERSION_C) || \
129 (SPI_IP_AR_RELEASE_MINOR_VERSION_CFG != SPI_IP_AR_RELEASE_MINOR_VERSION_C) || \
130 (SPI_IP_AR_RELEASE_REVISION_VERSION_CFG != SPI_IP_AR_RELEASE_REVISION_VERSION_C))
131 #error "AutoSar Version Numbers of Spi_Ip_Cfg.h and Spi_Ip.c are different"
132 #endif
133 #if ((SPI_IP_SW_MAJOR_VERSION_CFG != SPI_IP_SW_MAJOR_VERSION_C) || \
134 (SPI_IP_SW_MINOR_VERSION_CFG != SPI_IP_SW_MINOR_VERSION_C) || \
135 (SPI_IP_SW_PATCH_VERSION_CFG != SPI_IP_SW_PATCH_VERSION_C))
136 #error "Software Version Numbers of Spi_Ip_Cfg.h and Spi_Ip.c are different"
137 #endif
138 /*==================================================================================================
139 * LOCAL TYPEDEFS (STRUCTURES, UNIONS, ENUMS)
140 ==================================================================================================*/
141 /*==================================================================================================
142 * LOCAL MACROS
143 ==================================================================================================*/
144 #if (SPI_IP_DMA_USED == STD_ON)
145 /* the maximum of Major loop when Minor loop Channel Linking Disabled */
146 #define SPI_IP_DMA_MAX_ITER_CNT_U16 ((uint16)0x7FFFu)
147 #endif
148
149 /* Maximum value of CTARE[DTCP] */
150 #define SPI_IP_CTARE_DTCP_MAX_U16 (SPI_CTARE_DTCP_MASK >> SPI_CTARE_DTCP_SHIFT)
151 /* Pushr CONT mask to be used for CMD field which is uint16 */
152 #define SPI_IP_PUSHR_CONT_MASK_U16 ((uint16)(SPI_PUSHR_CONT_MASK >> 16))
153 /* Maximum number of bytes that can be written in a FIFO entry */
154 #define SPI_TX_WORD_SIZE (SPI_TXFR_TXDATA_WIDTH / 8U)
155 /* Maximum number of bytes that can be read from a FIFO entry */
156 #define SPI_RX_WORD_SIZE (SPI_RXFR_RXDATA_WIDTH / 8U)
157 /*==================================================================================================
158 * LOCAL CONSTANTS
159 ==================================================================================================*/
160 #define SPI_START_SEC_CONST_UNSPECIFIED
161 #include "Spi_MemMap.h"
162
163 static SPI_Type* const Spi_Ip_apxBases[SPI_INSTANCE_COUNT] = IP_SPI_BASE_PTRS;
164
165 #define SPI_STOP_SEC_CONST_UNSPECIFIED
166 #include "Spi_MemMap.h"
167 /*==================================================================================================
168 * LOCAL VARIABLES
169 ==================================================================================================*/
170
171 #if (SPI_IP_DMA_USED == STD_ON)
172 #define SPI_START_SEC_VAR_CLEARED_32_NO_CACHEABLE
173 #include "Spi_MemMap.h"
174 VAR_SEC_NOCACHE(Spi_Ip_u32DiscardData) static uint32 Spi_Ip_u32DiscardData;
175 #define SPI_STOP_SEC_VAR_CLEARED_32_NO_CACHEABLE
176 #include "Spi_MemMap.h"
177 #endif
178
179 #define SPI_START_SEC_VAR_CLEARED_UNSPECIFIED_NO_CACHEABLE
180 #include "Spi_MemMap.h"
181
182 VAR_SEC_NOCACHE(Spi_Ip_axStateStructure) static Spi_Ip_StateStructureType Spi_Ip_axStateStructure[SPI_IP_NUMBER_OF_INSTANCES];
183 VAR_SEC_NOCACHE(Spi_Ip_apxStateStructureArray) static Spi_Ip_StateStructureType* Spi_Ip_apxStateStructureArray[SPI_INSTANCE_COUNT];
184
185 #define SPI_STOP_SEC_VAR_CLEARED_UNSPECIFIED_NO_CACHEABLE
186 #include "Spi_MemMap.h"
187
188 /*==================================================================================================
189 * GLOBAL CONSTANTS
190 ==================================================================================================*/
191 /*==================================================================================================
192 GLOBAL VARIABLES
193 ==================================================================================================*/
194 /*==================================================================================================
195 * LOCAL FUNCTION PROTOTYPES
196 ==================================================================================================*/
197 #define SPI_START_SEC_CODE
198 #include "Spi_MemMap.h"
199
200 static void Spi_Ip_TransferProcess(uint8 Instance);
201 #if (SPI_IP_DMA_USED == STD_ON)
202 static void Spi_Ip_CmdDmaTcdSGInit(uint8 Instance);
203 #if (SPI_IP_ENABLE_DMAFASTTRANSFER_SUPPORT == STD_ON)
204 static void Spi_Ip_CmdDmaTcdSGConfig( uint8 Instance,
205 uint8 TCDSGId,
206 uint32 CmdAdd,
207 uint16 Iter,
208 uint8 DisHwReq
209 );
210 static void Spi_Ip_DmaFastConfig(uint8 Instance, const Spi_Ip_FastTransferType *FastTransferCfg, uint8 NumberOfTransfer);
211 static void Spi_Ip_RxDmaTcdSGConfig(uint8 Instance, uint8 TCDSGIndex, uint8 DisHwReq);
212 static void Spi_Ip_RxDmaTcdSGInit(uint8 Instance);
213 static void Spi_Ip_TxDmaTcdSGConfig(uint8 Instance, uint8 TCDSGIndex, uint8 DisHwReq);
214 static void Spi_Ip_TxDmaTcdSGInit(uint8 Instance);
215 #endif
216 #endif
217 #if (STD_ON == SPI_IP_ENABLE_USER_MODE_SUPPORT)
218 void Spi_Ip_SetUserAccess(uint8 Instance);
219 static void Spi_Ip_SetUserAccessAllowed(uint8 Instance);
220 #endif /* SPI_IP_ENABLE_USER_MODE_SUPPORT */
221
222 static void Spi_Ip_WriteTxFifo
223 (
224 uint16 NumberOfWrites,
225 uint8 Instance
226 );
227 static void Spi_Ip_ReceiveData
228 (
229 uint16 NumberOfReads,
230 uint8 Instance
231 );
232 static void Spi_Ip_ChannelFinished
233 (
234 uint8 Instance, boolean ErrorFlag
235 );
236
237 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
238 static void Spi_Ip_CheckValidParameters
239 (
240 const Spi_Ip_ExternalDeviceType *ExternalDevice,
241 uint16 Length,
242 const uint8 *TxBuffer,
243 const uint8 *RxBuffer,
244 uint32 TimeOut
245 );
246 #endif
247 /*==================================================================================================
248 * LOCAL FUNCTIONS
249 ==================================================================================================*/
250
251 /**
252 * @brief Fill CMD FIFO
253 * @details Write PushrCmds to CMD FIFO
254 *
255 * @param[in] State State of the current transfer
256 * @param[in] Base Base address of the instance regs to be updated
257 * @return none
258 */
Spi_Ip_WriteCmdFifo(Spi_Ip_StateStructureType * State,SPI_Type * Base)259 static uint16 Spi_Ip_WriteCmdFifo(Spi_Ip_StateStructureType* State, SPI_Type *Base)
260 {
261 uint16 NumberOfCmdWrites = 0u;
262 uint16 i;
263
264 NumberOfCmdWrites = (uint16)((Base->SREX) & SPI_SREX_CMDCTR_MASK) >> SPI_SREX_CMDCTR_SHIFT;
265 NumberOfCmdWrites = SPI_IP_FIFO_SIZE_U16 - NumberOfCmdWrites;
266 if (NumberOfCmdWrites > State->ExpectedCmdFifoWrites)
267 {
268 NumberOfCmdWrites = State->ExpectedCmdFifoWrites;
269 }
270 for (i = 0; i < NumberOfCmdWrites; i++)
271 {
272 if (State->Pushr0RepeatIndex < State->Pushr0Repeat)
273 {
274 State->Pushr0RepeatIndex++;
275 }
276 else
277 {
278 State->NbCmdsIndex++;
279 }
280 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
281 DevAssert(State->NbCmdsIndex < 3u);
282 #endif
283 Base->PUSHR.FIFO.CMD = State->PushrCmds[State->NbCmdsIndex];
284 State->ExpectedCmdFifoWrites--;
285 }
286
287 return NumberOfCmdWrites;
288 }
289
290 /**
291 * @brief This function is called by Spi_Ip_IrqHandler or Spi_Ip_ManageBuffers. It will process transfer in interrupt mode and polling mode.
292 * @details This function will fill data into TX FIFO and read data in RX FIFO fill to Rx Buffers.
293 *
294 * @param[in] Instance Index of the hardware instance.
295 * @return void
296 */
Spi_Ip_TransferProcess(uint8 Instance)297 static void Spi_Ip_TransferProcess(uint8 Instance)
298 {
299 SPI_Type* Base = Spi_Ip_apxBases[Instance];
300 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
301 uint32 SrStatusRegister;
302 uint16 NumberOfReads;
303 boolean ErrorFlag = (boolean)FALSE;
304
305 if (SPI_IP_BUSY == State->Status)
306 {
307 /* Read Status and clear all flags. */
308 SrStatusRegister = Base->SR;
309 Base->SR &= 0xFFFF0000u;
310
311 if ( (0u != (SrStatusRegister & (SPI_SR_RFOF_MASK | SPI_SR_TFUF_MASK))) )
312 {
313 /* mark error flag */
314 ErrorFlag = (boolean)TRUE;
315 }
316 else
317 {
318 /* Read all data available in receive HW fifo. */
319 NumberOfReads = (uint16)(((Base->SR) & SPI_SR_RXCTR_MASK) >> SPI_SR_RXCTR_SHIFT);
320 if(NumberOfReads != 0u)
321 {
322 /* Read data from RX FIFO */
323 Spi_Ip_ReceiveData(NumberOfReads, Instance);
324 }
325
326 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
327 if((boolean)FALSE == State->PhyUnitConfig->SlaveMode)
328 #endif
329 {
330 if (State->ExpectedCmdFifoWrites != 0u)
331 {
332 (void)Spi_Ip_WriteCmdFifo(State, Base);
333 }
334 }
335 /* Push data until HW fifo is full or transfer is done. */
336 /* After driver code read all frames in RX FIFO, if there are still some frames in TX FIFO, at the time before driver code check number of frames available in TX FIFO to prepare
337 to fill TX FIFO. At that time, another interrupt occurred and preemptive current interrupt, and the time to process that interrupt is longer than the time to transfer all frames
338 in TX FIFO. So TX FIFO will be empty and some frames received in RX FIFO, then the program is returned from that interrupt and fill TX FIFO until full and exist SPI interrupt function.
339 And if there is a interrupt occurred with higher priority of SPI interrupt and the time to process that interrupt is longer than the time to transfer all frames in TX FIFO.
340 So, RX FIFO can be overflow due to SPI interrupt function is not serviced to read RX FIFO.
341 To avoid this, limitation total of frames in TX FIFO and RX FIFO is lower than FIFO's size(considered that maybe there is an frame on going). */
342 if (State->CurrentTxFifoSlot > (State->ExpectedFifoWrites - State->TxIndex))
343 {
344 State->CurrentTxFifoSlot = State->ExpectedFifoWrites - State->TxIndex;
345 }
346 if(State->CurrentTxFifoSlot != 0u)
347 {
348 Spi_Ip_WriteTxFifo(State->CurrentTxFifoSlot, Instance);
349 /* Update current TX FIFO slot can be written */
350 State->CurrentTxFifoSlot = 0u;
351 }
352 }
353 /* End of transfer if all of frames are received or has error */
354 if ((State->RxIndex == State->ExpectedFifoReads) || ((boolean)TRUE == ErrorFlag))
355 {
356 /* Only set HALT bit to stop transfer when there is a error or request to de-assert CS at the end of transfers sequence */
357 if(((boolean)TRUE == ErrorFlag) || ((boolean)FALSE == State->KeepCs))
358 {
359 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_12();
360 /* Stop Transfer */
361 Base->MCR |= SPI_MCR_HALT_MASK;
362 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_12();
363 }
364 /* Disable interrupts and DMA requests. */
365 Base->RSER = 0U;
366 /* Channel finished */
367 Spi_Ip_ChannelFinished(Instance, ErrorFlag);
368 }
369 }
370 }
371
Spi_Ip_CalculateFifoWords(uint16 FrameSize,uint16 NbBytes,uint16 * Frames,uint16 * TxBytesPerWord,uint16 * RxBytesPerWord)372 static void Spi_Ip_CalculateFifoWords(uint16 FrameSize, uint16 NbBytes,
373 uint16 *Frames, uint16 *TxBytesPerWord,
374 uint16 *RxBytesPerWord)
375 {
376 uint16 BytesPerWord;
377 uint16 BytesPerFrame;
378
379 /* Calculate value of BytesPerWord and BytesPerFrame based on FrameSize */
380 if (FrameSize > 16u)
381 {
382 BytesPerWord = 4u;
383 BytesPerFrame = FrameSize / 32u * BytesPerWord;
384 /* E.g. 72-bit frame -> 3 words (32,32,8) bits */
385 if (0u != (FrameSize % 32u))
386 {
387 BytesPerFrame += BytesPerWord;
388 }
389 }
390 else if (FrameSize > 8u)
391 {
392 BytesPerWord = 2u;
393 BytesPerFrame = BytesPerWord;
394 }
395 else
396 {
397 BytesPerWord = 1u;
398 BytesPerFrame = BytesPerWord;
399 }
400
401 *Frames = NbBytes / BytesPerFrame;
402
403 /* Return maximum bytes that fits in TX FIFO */
404 if (BytesPerWord < SPI_TX_WORD_SIZE)
405 {
406 *TxBytesPerWord = BytesPerWord;
407 }
408 else
409 {
410 *TxBytesPerWord = SPI_TX_WORD_SIZE;
411 }
412
413 /* Return by maximum bytes that fits in RX FIFO */
414 if (BytesPerWord < SPI_RX_WORD_SIZE)
415 {
416 *RxBytesPerWord = BytesPerWord;
417 }
418 else
419 {
420 *RxBytesPerWord = SPI_RX_WORD_SIZE;
421 }
422 }
423
Spi_Ip_UpdateCtarAndPushr(Spi_Ip_StateStructureType * State,SPI_Type * Base)424 static void Spi_Ip_UpdateCtarAndPushr(Spi_Ip_StateStructureType* State, SPI_Type *Base)
425 {
426 const Spi_Ip_ExternalDeviceType *Dev = State->ExternalDevice;
427 uint32 PushrCmd32 = ((uint32)Dev->PushrCmd) << 16u;
428 uint16 CtarFrameSize;
429 uint16 CtareFrameSize;
430 uint32 CtarSrc;
431 uint16 i;
432
433 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
434 DevAssert(State->NbCmds > 0u);
435 #endif
436
437 #if (SPI_IP_DUAL_CLOCK_MODE == STD_ON)
438 CtarSrc = Dev->Ctar[State->ClockMode];
439 #else
440 CtarSrc = Dev->Ctar;
441 #endif
442
443 CtarFrameSize = (uint16)Dev->DeviceParams->FrameSize - 1u;
444 CtareFrameSize = (CtarFrameSize >> 4u) & 0x1u;
445 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
446 if (State->PhyUnitConfig->SlaveMode)
447 {
448 Base->MODE.CTAR_SLAVE[0] = CtarSrc | SPI_CTAR_SLAVE_FMSZ(CtarFrameSize);
449 }
450 else
451 #endif
452 {
453 for (i = 0u; i < State->NbCmds; i++) {
454 Base->MODE.CTAR[i] = CtarSrc |
455 SPI_CTAR_FMSZ(CtarFrameSize) |
456 SPI_CTAR_LSBFE(Dev->DeviceParams->Lsb ? 1u : 0u);
457 Base->CTARE[i] = Dev->Ctare |
458 SPI_CTARE_FMSZE(CtareFrameSize) |
459 SPI_CTARE_DTCP(State->DTCPValue[i]);
460 State->PushrCmds[i] = (uint16)(((PushrCmd32 & (~SPI_PUSHR_CTAS_MASK)) |
461 SPI_PUSHR_CTAS(i)) >> 16u);
462 }
463 /* Update last pushr to clear CS if needed */
464 if ((State->PushrCmds[i - 1u] & SPI_IP_PUSHR_CONT_MASK_U16) != 0u)
465 {
466 State->PushrCmds[i - 1u] &= ~SPI_IP_PUSHR_CONT_MASK_U16;
467 }
468 }
469 }
470
471 /**
472 * @brief Init State and update CTAR, CTARE and PUSHR
473 * @details SPI_IP_CTARE_DTCP_MAX_U16 will be repeated the needed number of times.
474 * Remaining bytes will be sent with a new DTCP
475 * If CONT needs to be used, an other DTCP is configured and the last updated
476 * Write CTAR, CTARE and prepare PUSHR values.
477 * Prepare PushrCmd values based on DTCPs and CONT.
478 *
479 * @param[in] State State of the current transfer
480 * @param[in] Base Base address of the instance regs to be updated
481 * @param[in] NbBytes Number of bytes of the transfer
482 * @return void
483 */
Spi_Ip_PrepareTransfer(Spi_Ip_StateStructureType * State,SPI_Type * Base,uint16 NbBytes)484 static void Spi_Ip_PrepareTransfer(Spi_Ip_StateStructureType* State, SPI_Type *Base, uint16 NbBytes)
485 {
486 uint16 Frames;
487 uint16 TxBytesPerWord;
488 uint16 RxBytesPerWord;
489
490 Spi_Ip_CalculateFifoWords(State->ExternalDevice->DeviceParams->FrameSize, NbBytes,
491 &Frames, &TxBytesPerWord, &RxBytesPerWord);
492
493 /*
494 * How a transfer works using DTCP feature:
495 * Transfer is split in up to 3 commands, each one using a different CTAR with its own DTCP value.
496 * Case 1: CS_TOGGLE and Frames < SPI_IP_CTARE_DTCP_MAX_U16
497 * - CMD0 uses CTAR0 with DTCP = Frames -> One command sends all frames.
498 *
499 * Case 2: CS_TOGGLE and Frames > DTCP_MAX
500 * - CMD0 uses CTAR0 with DTCP = DTCP_MAX -> CMD0 is sent until (repeated) remaining frames < DTCP_MAX
501 * - CMD1 uses CTAR1 with DTCP = remaining frames -> CMD1 is sent after all CMD0 are sent to send remaining frames
502 *
503 * Case 3: CS_KEEP_ASSERTED and Keep_CS = True and Frames < SPI_IP_CTARE_DTCP_MAX_U16
504 * - CMD0 uses CTAR0 with DTCP = Frames - 1
505 * - CMD1 uses CTAR1 with DTCP = 1 (Unset CONT bit)
506 *
507 * Case 4: CS_KEEP_ASSERTED and Keep_CS = True and Frames > SPI_IP_CTARE_DTCP_MAX_U16
508 * - CMD0 uses CTAR0 as in case 2
509 * - CMD1 uses CTAR1 with DTCP = remaining frames - 1
510 * - CMD2 uses CTAR2 with DTCP = 1 (Unset CONT bit)
511 */
512 State->RxIndex = 0u;
513 State->TxIndex = 0u;
514 State->NbCmdsIndex = 0;
515 State->Pushr0RepeatIndex = 0;
516 State->NbCmds = 1u;
517 #if SPI_IP_SLAVE_SUPPORT == STD_ON
518 if ((boolean)FALSE == State->PhyUnitConfig->SlaveMode)
519 #endif
520 {
521 if (TRUE == State->KeepCs)
522 {
523 /* Can't use CTARE[DTCP] if CS is active between transfers.
524 * DTCP might be different for the next transfer and CTARE can't be updated
525 * in the running state
526 */
527 State->DTCPValue[0] = 1;
528 State->Pushr0Repeat = Frames;
529 State->ExpectedCmdFifoWrites = State->Pushr0Repeat;
530
531 if (((State->ExternalDevice->PushrCmd & SPI_IP_PUSHR_CONT_MASK_U16) != 0u) && (Frames > 1u))
532 {
533 State->Pushr0Repeat--;
534 State->DTCPValue[1] = 1;
535 State->NbCmds++;
536 }
537 }
538 else
539 {
540 /* Prepare state to use CTARE[DTCP] */
541 if (Frames < SPI_IP_CTARE_DTCP_MAX_U16) {
542 /* Max DTCP value is enough to send Frames. Set 1 CMD to be pushed 1 time */
543 State->DTCPValue[0] = Frames % SPI_IP_CTARE_DTCP_MAX_U16;
544 State->Pushr0Repeat = 1u;
545 State->DTCPValue[1] = 0u;
546 }
547 else
548 {
549 /*
550 * Max DTCP value is not enough to send Frames
551 * Use 2 CMDs:
552 * 1: Set DTCP to Max DTCP value. To be pushed Frames / DTCP_MAX times
553 * 2: Set DTCP to remaining frames. To be pushed 1 time.
554 */
555 State->DTCPValue[0] = SPI_IP_CTARE_DTCP_MAX_U16;
556 State->Pushr0Repeat = Frames / SPI_IP_CTARE_DTCP_MAX_U16;
557 State->DTCPValue[1] = Frames % SPI_IP_CTARE_DTCP_MAX_U16;
558 State->NbCmds++;
559 }
560 /*
561 * Split last CMD in 2 if:
562 * - KeepCs and CS Keep asserted are set and
563 * - Last calculated command DTCP is greater then 1.
564 * Last CMD DTCP will become DTCP - 1.
565 * New CMD is created with DTCP = 1 disabling CONT
566 */
567 if (State->DTCPValue[State->NbCmds - 1u] > 1u)
568 {
569 State->DTCPValue[State->NbCmds - 1u]--;
570 State->DTCPValue[State->NbCmds] = 1u;
571 State->NbCmds++;
572 }
573 /* Cmd 0 is repeated Pushr0Repeat times and then each cmd 1 time */
574 State->ExpectedCmdFifoWrites = State->Pushr0Repeat + State->NbCmds - 1u;
575 }
576 }
577 State->ExpectedFifoWrites = NbBytes / TxBytesPerWord;
578 State->ExpectedFifoReads = NbBytes / RxBytesPerWord;
579
580 Spi_Ip_UpdateCtarAndPushr(State, Base);
581 }
582
583 #if (SPI_IP_DMA_USED == STD_ON)
584 /**
585 * @brief This function will initialize all software TCDs Scatter Gather for Command DMA channel.
586 * And load software TCD Scatter Gather 0 into hardware TCD.
587 *
588 * @param[in] Instance Index of the hardware instance.
589 * @return void
590 */
Spi_Ip_CmdDmaTcdSGInit(uint8 Instance)591 static void Spi_Ip_CmdDmaTcdSGInit(uint8 Instance)
592 {
593 const SPI_Type* Base = Spi_Ip_apxBases[Instance];
594 const Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
595 Dma_Ip_LogicChannelScatterGatherListType DmaTcdList[9u];
596 uint8 TCDSGIndex = 0u;
597
598 /* initialze configuration software TCD Scatter Gather for Cmd DMA channel */
599 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
600 DmaTcdList[1u].Param = DMA_IP_CH_SET_DESTINATION_ADDRESS;
601 DmaTcdList[2u].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
602 DmaTcdList[3u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
603 DmaTcdList[4u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
604 DmaTcdList[5u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
605 DmaTcdList[6u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_OFFSET;
606 DmaTcdList[7u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
607 DmaTcdList[8u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
608
609 DmaTcdList[0u].Value = 0u; /* dummy src address read, it will be updated later */
610 DmaTcdList[1u].Value = (Dma_Ip_uintPtrType)&Base->PUSHR.FIFO.CMD; /* dest address write*/
611 DmaTcdList[2u].Value = 0u; /* no src offset */
612 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes src transfer size */
613 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes dest transfer size */
614 DmaTcdList[5u].Value = 2u; /* bytes to transfer for each request */
615 DmaTcdList[6u].Value = 0u; /* no dest offset */
616 DmaTcdList[7u].Value = 0u; /* dummy iteration count will be updated later */
617 DmaTcdList[8u].Value = 1u; /* dummy disable hardware request when major loop complete, will be updated latter in the case of Scatter Gather enabled */
618
619 for(TCDSGIndex = 0u; TCDSGIndex < State->PhyUnitConfig->NumTxCmdDmaSGId; TCDSGIndex++)
620 {
621 /* Update software TCD Scatter Gather */
622 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxCmdDmaChannel, State->PhyUnitConfig->TxCmdDmaSGId[TCDSGIndex], DmaTcdList, 9u);
623 }
624
625 /* Load software TCD Scatter Gather 0 into hardware TCD */
626 (void)Dma_Ip_SetLogicChannelScatterGatherConfig(State->PhyUnitConfig->TxCmdDmaChannel, State->PhyUnitConfig->TxCmdDmaSGId[0u]);
627 }
628
629 #if (SPI_IP_ENABLE_DMAFASTTRANSFER_SUPPORT == STD_ON)
Spi_Ip_TxDmaTcdSGInit(uint8 Instance)630 static void Spi_Ip_TxDmaTcdSGInit(uint8 Instance)
631 {
632 const SPI_Type* Base = Spi_Ip_apxBases[Instance];
633 const Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
634 Dma_Ip_LogicChannelTransferListType DmaTcdList[10u];
635 uint8 TCDSGIndex = 0u;
636
637 /* initialze configuration software TCD Scatter Gather for Tx DMA channel */
638 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
639 DmaTcdList[1u].Param = DMA_IP_CH_SET_DESTINATION_ADDRESS;
640 DmaTcdList[2u].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
641 DmaTcdList[3u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
642 DmaTcdList[4u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
643 DmaTcdList[5u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
644 DmaTcdList[6u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_OFFSET;
645 DmaTcdList[7u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
646 DmaTcdList[8u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
647 DmaTcdList[9u].Param = DMA_IP_CH_SET_SOURCE_MODULO;
648
649 DmaTcdList[1u].Value = (Dma_Ip_uintPtrType)&Base->PUSHR.FIFO.TX; /* dest address write*/
650 DmaTcdList[2u].Value = 1u; /* dummy src offset is 1 byte, will be updated latter according to frame size and transfer default data */
651 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* dummy 1 byte src transfer size, will be updated latter according to frame size */
652 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* dummy 1 byte dest transfer size, will be updated latter according to frame size */
653 DmaTcdList[5u].Value = 1u; /* dummy bytes to transfer for each request, will be updated latter according to frame size */
654 DmaTcdList[6u].Value = 0u; /* no dest offset */
655 DmaTcdList[7u].Value = 0u; /* dummy iteration count, will be updated latter according to number of frames */
656 DmaTcdList[8u].Value = 1u; /* dummy disable hardware request when major loop complete, will be updated latter according to last transfer or not */
657 DmaTcdList[9u].Value = 0u; /* dummy no src address modulo, will be updated latter Base on transfer default data or not */
658 DmaTcdList[0u].Value = 0u; /* dummy src address read, will be updated latter Base on TxBuffer */
659
660 for(TCDSGIndex = 0u; TCDSGIndex < State->PhyUnitConfig->MaxNumOfFastTransfer; TCDSGIndex++)
661 {
662 /* Update software TX DMA TCD Scatter Gather */
663 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxDmaChannel, State->PhyUnitConfig->TxDmaFastSGId[TCDSGIndex], DmaTcdList, 10u);
664 }
665 }
666
Spi_Ip_TxDmaTcdSGConfig(uint8 Instance,uint8 TCDSGIndex,uint8 DisHwReq)667 static void Spi_Ip_TxDmaTcdSGConfig(uint8 Instance, uint8 TCDSGIndex, uint8 DisHwReq)
668 {
669 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
670 Dma_Ip_LogicChannelTransferListType DmaTcdList[8u];
671
672 /* Update buffers index */
673 State->TxIndex = State->ExpectedFifoWrites;
674
675 /* configure TX DMA TCD Scatter Gather */
676 /* No need to configure dest address and dest offset due to they are already set by Spi_Ip_TxDmaTcdSGInit */
677 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
678 DmaTcdList[1u].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
679 DmaTcdList[2u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
680 DmaTcdList[3u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
681 DmaTcdList[4u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
682 DmaTcdList[5u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
683 DmaTcdList[6u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
684 DmaTcdList[7u].Param = DMA_IP_CH_SET_SOURCE_MODULO;
685
686 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
687 {
688 DmaTcdList[1u].Value = 1u; /* src offset is 1 byte */
689 DmaTcdList[2u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte src transfer size */
690 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte dest transfer size */
691 DmaTcdList[4u].Value = 1u; /* bytes to transfer for each request */
692 }
693 else
694 {
695 DmaTcdList[1u].Value = 2u; /* src offset is 2 bytes */
696 DmaTcdList[2u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes src transfer size */
697 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes dest transfer size */
698 DmaTcdList[4u].Value = 2u; /* bytes to transfer for each request */
699 }
700 DmaTcdList[5u].Value = State->ExpectedFifoWrites; /* iteration count */
701 DmaTcdList[6u].Value = DisHwReq; /* disable hardware request when major loop complete */
702 DmaTcdList[7u].Value = 0u; /* no src address modulo */
703 if(NULL_PTR == State->TxBuffer)
704 {
705 /* send default data */
706 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)&State->PhyUnitConfig->CmdDmaFast[TCDSGIndex].DefaultData; /* src address read */
707 if(State->ExternalDevice->DeviceParams->FrameSize < 17u)
708 {
709 DmaTcdList[1u].Value = 0u; /* src offset is 0 byte */
710 }
711 else
712 {
713 DmaTcdList[7u].Value = 2u; /* data is 4 bytes, src address modulo is 2 bits */
714 }
715 }
716 else
717 {
718 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)State->TxBuffer; /* src address read */
719 }
720 /* Update software TX DMA TCD Scatter Gather */
721 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxDmaChannel, State->PhyUnitConfig->TxDmaFastSGId[TCDSGIndex], DmaTcdList, 8u);
722 }
723
Spi_Ip_RxDmaTcdSGInit(uint8 Instance)724 static void Spi_Ip_RxDmaTcdSGInit(uint8 Instance)
725 {
726 const SPI_Type* Base = Spi_Ip_apxBases[Instance];
727 const Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
728 Dma_Ip_LogicChannelTransferListType DmaTcdList[9u];
729 uint8 TCDSGIndex = 0u;
730
731 /* initialze configuration software TCD Scatter Gather for Rx DMA channel */
732 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
733 DmaTcdList[1u].Param = DMA_IP_CH_SET_DESTINATION_ADDRESS;
734 DmaTcdList[2u].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
735 DmaTcdList[3u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
736 DmaTcdList[4u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
737 DmaTcdList[5u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
738 DmaTcdList[6u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_OFFSET;
739 DmaTcdList[7u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
740 DmaTcdList[8u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
741
742 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)&Base->POPR; /* src address read */
743 DmaTcdList[2u].Value = 0u; /* no src offset */
744 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* dummy 1 byte src transfer size, will be updated latter Base on frame size */
745 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* dummy 1 byte dest transfer size, will be updated latter Base on frame size */
746 DmaTcdList[5u].Value = 1u; /* dummy 1 byte to transfer for each request, will be updated latter Base on frame size */
747 DmaTcdList[6u].Value = 1u; /* dummy dest offset is 1 byte, will be updated latter Base on frame size and discard info */
748 DmaTcdList[1u].Value = 0u; /* dummy dest address write, will be updated latter Base on RxBuffer */
749 DmaTcdList[7u].Value = 0u; /* dummy iteration count, will be updated latter Base on number of frames */
750 DmaTcdList[8u].Value = 1u; /* dummy disable hardware request when major loop complete, will be updated latter according to last transfer or not */
751
752 for(TCDSGIndex = 0u; TCDSGIndex < State->PhyUnitConfig->MaxNumOfFastTransfer; TCDSGIndex++)
753 {
754 /* Update software RX DMA TCD Scatter Gather */
755 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->RxDmaChannel, State->PhyUnitConfig->RxDmaFastSGId[TCDSGIndex], DmaTcdList, 9u);
756 }
757 }
758
Spi_Ip_RxDmaTcdSGConfig(uint8 Instance,uint8 TCDSGIndex,uint8 DisHwReq)759 static void Spi_Ip_RxDmaTcdSGConfig(uint8 Instance, uint8 TCDSGIndex, uint8 DisHwReq)
760 {
761 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
762 Dma_Ip_LogicChannelTransferListType DmaTcdList[8u];
763
764 /* Update buffers index */
765 State->RxIndex = State->ExpectedFifoReads;
766
767 /* configure RX DMA TCD Scatter Gather */
768 /* No need to configure src address and src offset due to they are already set by Spi_Ip_RxDmaTcdSGInit */
769 DmaTcdList[0u].Param = DMA_IP_CH_SET_DESTINATION_ADDRESS;
770 DmaTcdList[1u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
771 DmaTcdList[2u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
772 DmaTcdList[3u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
773 DmaTcdList[4u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_OFFSET;
774 DmaTcdList[5u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
775 DmaTcdList[6u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
776 DmaTcdList[7u].Param = DMA_IP_CH_SET_CONTROL_EN_MAJOR_INTERRUPT;
777
778 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
779 {
780 DmaTcdList[1u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte src transfer size */
781 DmaTcdList[2u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte dest transfer size */
782 DmaTcdList[3u].Value = 1u; /* 1 byte to transfer for each request */
783 DmaTcdList[4u].Value = 1u; /* dest offset is 1 bytes */
784 }
785 else if (State->ExternalDevice->DeviceParams->FrameSize < 17u)
786 {
787 DmaTcdList[1u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes src transfer size */
788 DmaTcdList[2u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes dest transfer size */
789 DmaTcdList[3u].Value = 2u; /* 2 bytes to transfer for each request */
790 DmaTcdList[4u].Value = 2u; /* dest offset is 2 bytes */
791 }
792 else
793 {
794 DmaTcdList[1u].Value = DMA_IP_TRANSFER_SIZE_4_BYTE; /* 4 bytes src transfer size */
795 DmaTcdList[2u].Value = DMA_IP_TRANSFER_SIZE_4_BYTE; /* 4 bytes dest transfer size */
796 DmaTcdList[3u].Value = 4u; /* 4 bytes to transfer for each request */
797 DmaTcdList[4u].Value = 4u; /* dest offset is 4 bytes */
798 }
799 if(NULL_PTR == State->RxBuffer)
800 {
801 /* Discard data */
802 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)&Spi_Ip_u32DiscardData; /* dest address write*/
803 DmaTcdList[4u].Value = 0u; /* dest offset is 0 bytes */
804 }
805 else
806 {
807 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)State->RxBuffer; /* dest address write*/
808 }
809 DmaTcdList[5u].Value = State->ExpectedFifoReads; /* iteration count */
810 DmaTcdList[6u].Value = DisHwReq; /* disable hardware request when major loop complete */
811 DmaTcdList[7u].Value = DisHwReq; /* Enable Major interrupt at the end of transfer sequence(meanning when DisHwReq = 1u) */
812
813 /* Update software RX DMA TCD Scatter Gather */
814 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->RxDmaChannel, State->PhyUnitConfig->RxDmaFastSGId[TCDSGIndex], DmaTcdList, 8u);
815 }
816
817 /**
818 * @brief This function will configure TCD Scatter Gather for the CMD DMA channel
819 */
Spi_Ip_CmdDmaTcdSGConfig(uint8 Instance,uint8 TCDSGId,uint32 CmdAdd,uint16 Iter,uint8 DisHwReq)820 static void Spi_Ip_CmdDmaTcdSGConfig( uint8 Instance,
821 uint8 TCDSGId,
822 uint32 CmdAdd,
823 uint16 Iter,
824 uint8 DisHwReq
825 )
826 {
827 const Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
828 Dma_Ip_LogicChannelTransferListType DmaTcdList[3u];
829
830 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
831 DmaTcdList[1u].Param = DMA_IP_CH_SET_MAJORLOOP_COUNT;
832 DmaTcdList[2u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
833
834 /* configure TCD Scatter Gather for Cmd DMA channel */
835 DmaTcdList[0u].Value = CmdAdd; /* src address read */
836 DmaTcdList[1u].Value = Iter; /* iteration count */
837 DmaTcdList[2u].Value = DisHwReq; /* disable hardware request when major loop complete */
838 /* Update software TCD Scatter Gather */
839 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxCmdDmaChannel, TCDSGId, DmaTcdList, 3u);
840 }
841
842 #endif /* (SPI_IP_ENABLE_DMAFASTTRANSFER_SUPPORT == STD_ON) */
843
844 /**
845 * @brief This function will configure hardware TCDs for the channels CMD DMA
846 * according to current transfer configuration.
847 *
848 * @param[in] State State of the current transfer
849 * @return void
850 */
Spi_Ip_DmaCmdConfigAndStart(const Spi_Ip_StateStructureType * State)851 static void Spi_Ip_DmaCmdConfigAndStart(const Spi_Ip_StateStructureType* State)
852 {
853 Dma_Ip_LogicChannelTransferListType Tcds[4u] = {
854 [0] = { DMA_IP_CH_SET_SOURCE_ADDRESS , (Dma_Ip_uintPtrType)&State->PushrCmds[0] },
855 [1] = { DMA_IP_CH_SET_MAJORLOOP_COUNT , State->Pushr0Repeat },
856 [2] = { DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST , 1u},
857 [3] = { DMA_IP_CH_SET_DESTINATION_SIGNED_LAST_ADDR_ADJ , 0u}
858 };
859
860 /*
861 * State->Pushr0Repeat will always be less than SPI_IP_DMA_MAX_ITER_CNT_U16 because DMA
862 * slave is not implemented.
863 * For master, the maximum value can be MAX_U16(FFFF)/MAX_DTCP(7FF) = 0x20
864 */
865
866 /* Use while 0 to use break on error to end the block faster */
867 if (State->NbCmds > 1u)
868 {
869 Tcds[2].Value = 0u; /* Enable HW Request */
870 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxCmdDmaChannel,
871 State->PhyUnitConfig->TxCmdDmaSGId[0],
872 Tcds,
873 3u);
874 /* Load software TCD Scatter Gather 0 into hardware TCD with ESG bit already set
875 * in software TCD */
876 (void)Dma_Ip_SetLogicChannelScatterGatherConfig(State->PhyUnitConfig->TxCmdDmaChannel, State->PhyUnitConfig->TxCmdDmaSGId[0u]);
877
878 Tcds[0].Value = (Dma_Ip_uintPtrType)&State->PushrCmds[1]; /* SOURCE_ADDRESS */
879 Tcds[1].Value = (uint32)State->NbCmds - 1u; /* MAJORLOOP */
880 Tcds[2].Value = 1u; /* Disable HW Request */
881 Tcds[3].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
882 Tcds[3].Value = 2;
883 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->TxCmdDmaChannel,
884 State->PhyUnitConfig->TxCmdDmaSGId[1],
885 Tcds,
886 4u);
887 }
888 else
889 {
890 Tcds[2].Value = 1u; /* Disable HW Request */
891 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->TxCmdDmaChannel,
892 Tcds, 4u);
893 }
894
895 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxCmdDmaChannel,
896 DMA_IP_CH_SET_HARDWARE_REQUEST);
897 }
898
899 /**
900 * @brief This function will configure hardware TCDs for the channels TX DMA, RX DMA
901 * according to current transfer configuration.
902 *
903 * @param[in] State State of the current transfer
904 * @param[in] Base Base address of the instance regs to be updated
905 * @return void
906 */
Spi_Ip_DmaConfig(const Spi_Ip_StateStructureType * State,const SPI_Type * Base)907 static void Spi_Ip_DmaConfig(const Spi_Ip_StateStructureType* State,const SPI_Type *Base)
908 {
909 Dma_Ip_LogicChannelTransferListType DmaTcdList[11u];
910
911 /* configure TX DMA channel */
912 DmaTcdList[0u].Param = DMA_IP_CH_SET_SOURCE_ADDRESS;
913 DmaTcdList[1u].Param = DMA_IP_CH_SET_DESTINATION_ADDRESS;
914 DmaTcdList[2u].Param = DMA_IP_CH_SET_SOURCE_SIGNED_OFFSET;
915 DmaTcdList[3u].Param = DMA_IP_CH_SET_SOURCE_TRANSFER_SIZE;
916 DmaTcdList[4u].Param = DMA_IP_CH_SET_DESTINATION_TRANSFER_SIZE;
917 DmaTcdList[5u].Param = DMA_IP_CH_SET_MINORLOOP_SIZE;
918 DmaTcdList[6u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_OFFSET;
919 DmaTcdList[7u].Param = DMA_IP_CH_SET_CONTROL_DIS_AUTO_REQUEST;
920 DmaTcdList[8u].Param = DMA_IP_CH_SET_SOURCE_MODULO;
921
922 DmaTcdList[1u].Value = (Dma_Ip_uintPtrType)&Base->PUSHR.FIFO.TX; /* dest address write*/
923 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
924 {
925 DmaTcdList[2u].Value = 1u; /* src offset is 1 byte */
926 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte src transfer size */
927 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte dest transfer size */
928 DmaTcdList[5u].Value = 1u; /* bytes to transfer for each request */
929 }
930 else
931 {
932 DmaTcdList[2u].Value = 2u; /* src offset is 2 bytes */
933 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes src transfer size */
934 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes dest transfer size */
935 DmaTcdList[5u].Value = 2u; /* bytes to transfer for each request */
936 }
937 DmaTcdList[6u].Value = 0u; /* no dest offset */
938 DmaTcdList[7u].Value = 1u; /* disable hardware request when major loop complete */
939 DmaTcdList[8u].Value = 0u; /* no src address modulo */
940 if(NULL_PTR == State->TxBuffer)
941 {
942 /* send default data */
943 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)&State->ExternalDevice->DeviceParams->DefaultData; /* src address read */
944 if(State->ExternalDevice->DeviceParams->FrameSize < 17u)
945 {
946 DmaTcdList[2u].Value = 0u; /* src offset is 0 byte */
947 }
948 else
949 {
950 DmaTcdList[8u].Value = 2u; /* data is 4 bytes, src address modulo is 2 bits */
951 }
952 }
953 else
954 {
955 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)State->TxBuffer; /* src address read */
956 }
957 /* Set DESTINATION_SIGNED_LAST_ADDR_ADJ = 0 to avoid the case it still stored from previous TCD Scatter Gather */
958 DmaTcdList[9u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_LAST_ADDR_ADJ;
959 DmaTcdList[9u].Value = 0u; /* No adjust DADD when major loop completed */
960 /* write TCD for TX DMA channel */
961 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->TxDmaChannel, DmaTcdList, 10u);
962
963 /* configure RX DMA channel */
964 DmaTcdList[0u].Value = (Dma_Ip_uintPtrType)&Base->POPR; /* src address read */
965 DmaTcdList[2u].Value = 0u; /* no src offset */
966 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
967 {
968 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte src transfer size */
969 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_1_BYTE; /* 1 byte dest transfer size */
970 DmaTcdList[5u].Value = 1u; /* 1 byte to transfer for each request */
971 DmaTcdList[6u].Value = 1u; /* dest offset is 1 bytes */
972 }
973 else if (State->ExternalDevice->DeviceParams->FrameSize < 17u)
974 {
975 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes src transfer size */
976 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_2_BYTE; /* 2 bytes dest transfer size */
977 DmaTcdList[5u].Value = 2u; /* 2 bytes to transfer for each request */
978 DmaTcdList[6u].Value = 2u; /* dest offset is 2 bytes */
979 }
980 else
981 {
982 DmaTcdList[3u].Value = DMA_IP_TRANSFER_SIZE_4_BYTE; /* 4 bytes src transfer size */
983 DmaTcdList[4u].Value = DMA_IP_TRANSFER_SIZE_4_BYTE; /* 4 bytes dest transfer size */
984 DmaTcdList[5u].Value = 4u; /* 4 bytes to transfer for each request */
985 DmaTcdList[6u].Value = 4u; /* dest offset is 4 bytes */
986 }
987 if(NULL_PTR == State->RxBuffer)
988 {
989 /* Discard data */
990 DmaTcdList[1u].Value = (Dma_Ip_uintPtrType)&Spi_Ip_u32DiscardData; /* dest address write*/
991 DmaTcdList[6u].Value = 0u; /* dest offset is 0 bytes */
992 }
993 else
994 {
995 DmaTcdList[1u].Value = (Dma_Ip_uintPtrType)State->RxBuffer; /* dest address write*/
996 }
997 DmaTcdList[7u].Value = 1u; /* disable hardware request when major loop complete */
998 /* Set DESTINATION_SIGNED_LAST_ADDR_ADJ = 0 to avoid the case it still stored from previous TCD Scatter Gather */
999 DmaTcdList[8u].Param = DMA_IP_CH_SET_DESTINATION_SIGNED_LAST_ADDR_ADJ;
1000 DmaTcdList[8u].Value = 0u; /* No adjust DADD when major loop completed */
1001 /* write TCD for RX DMA channel */
1002 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->RxDmaChannel, DmaTcdList, 9u);
1003 }
1004
1005 /**
1006 * @brief This function will configure buffers in hardware TCDs for the channels TX DMA, RX DMA
1007 * and it will trigger the transfer
1008 *
1009 * @param[in] State State of the current transfer
1010 * @return void
1011 */
Spi_Ip_DmaTxRxUpdateLoop(Spi_Ip_StateStructureType * State)1012 static void Spi_Ip_DmaTxRxUpdateLoop(Spi_Ip_StateStructureType* State)
1013 {
1014 uint16 NumberDmaIterWrite = State->ExpectedFifoWrites - State->TxIndex;
1015 uint16 NumberDmaIterRead;
1016
1017 Dma_Ip_LogicChannelTransferListType Tcd = {
1018 .Param = DMA_IP_CH_SET_MAJORLOOP_COUNT
1019 };
1020
1021 /* Limits number of major count */
1022 if (SPI_IP_DMA_MAX_ITER_CNT_U16 < NumberDmaIterWrite)
1023 {
1024 NumberDmaIterWrite = SPI_IP_DMA_MAX_ITER_CNT_U16;
1025 }
1026
1027 NumberDmaIterRead = NumberDmaIterWrite;
1028 if (State->ExternalDevice->DeviceParams->FrameSize > 16u)
1029 {
1030 NumberDmaIterRead = NumberDmaIterWrite/2u;
1031 if (0u != (NumberDmaIterWrite%2u))
1032 {
1033 NumberDmaIterWrite = NumberDmaIterRead;
1034 }
1035 }
1036 State->TxIndex += NumberDmaIterWrite;
1037 State->RxIndex += NumberDmaIterRead;
1038
1039 Tcd.Value = NumberDmaIterWrite;
1040 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->TxDmaChannel, &Tcd, 1u);
1041
1042 Tcd.Value = NumberDmaIterRead;
1043 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->RxDmaChannel, &Tcd, 1u);
1044
1045 /* Enable HW request for RX DMA channel before TX DMA channel */
1046 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->RxDmaChannel, DMA_IP_CH_SET_HARDWARE_REQUEST);
1047 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxDmaChannel, DMA_IP_CH_SET_HARDWARE_REQUEST);
1048 }
1049
1050 /**
1051 * @brief Configures and starts a DMA transfer
1052 *
1053 * @param[in] State State of the current transfer
1054 * @param[in] Base Base address of the instance regs to be updated
1055 * @param[in] IrqEn Enable DMA major loop interrupt.
1056 * @return void
1057 */
Spi_Ip_DmaAsyncStart(Spi_Ip_StateStructureType * State,SPI_Type * Base,uint32 IrqEn)1058 static void Spi_Ip_DmaAsyncStart(Spi_Ip_StateStructureType* State, SPI_Type *Base, uint32 IrqEn)
1059 {
1060 Dma_Ip_LogicChannelTransferListType Tcd = {
1061 .Param = DMA_IP_CH_SET_CONTROL_EN_MAJOR_INTERRUPT,
1062 .Value = IrqEn
1063 };
1064
1065 (void)Dma_Ip_SetLogicChannelTransferList(State->PhyUnitConfig->RxDmaChannel, &Tcd, 1u);
1066 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
1067 if ((boolean)FALSE == State->PhyUnitConfig->SlaveMode)
1068 #endif
1069 {
1070 Spi_Ip_DmaCmdConfigAndStart(State);
1071 }
1072 /* Config RX and TX channels */
1073 Spi_Ip_DmaConfig(State, Base);
1074 /* Enable RX and TX channels */
1075 Spi_Ip_DmaTxRxUpdateLoop(State);
1076
1077 /* Enable SPI interrupts and set them to trigger DMA */
1078 Base->RSER = SPI_RSER_CMDFFF_RE(1) | SPI_RSER_CMDFFF_DIRS(1) |
1079 SPI_RSER_TFFF_RE(1) | SPI_RSER_TFFF_DIRS(1) |
1080 SPI_RSER_RFDF_RE(1) | SPI_RSER_RFDF_DIRS(1);
1081 }
1082
1083 #endif /*#if (SPI_IP_DMA_USED == STD_ON)*/
1084
1085 /**
1086 * @brief This function will write data to TX FIFO.
1087 * @details This function will write data to TX FIFO.
1088 *
1089 * @param[in] NumberOfReads Number of bytes data will be wrote to TX FIFO .
1090 * @param[in] Instance Index of the hardware instance.
1091 * @return none
1092 */
Spi_Ip_WriteTxFifo(uint16 NumberOfWrites,uint8 Instance)1093 static void Spi_Ip_WriteTxFifo
1094 (
1095 uint16 NumberOfWrites,
1096 uint8 Instance
1097 )
1098 {
1099 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
1100 SPI_Type* Base = Spi_Ip_apxBases[Instance];
1101 uint32 Idx;
1102 uint32 LastIdx;
1103 uint32 Factor;
1104 uint16 Count;
1105 uint16 Data = 0u;
1106
1107 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1108 DevAssert(NumberOfWrites <= SPI_IP_FIFO_SIZE_U16);
1109 #endif
1110 if(NULL_PTR != State->TxBuffer)
1111 {
1112 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
1113 {
1114 Factor = 1u;
1115 }
1116 else
1117 {
1118 Factor = 2u;
1119 }
1120 LastIdx = ((uint32)State->TxIndex + NumberOfWrites) * Factor;
1121 for (Idx = State->TxIndex * Factor; Idx < LastIdx; Idx += Factor)
1122 {
1123 if(State->ExternalDevice->DeviceParams->FrameSize < 9u)
1124 {
1125 Data = State->TxBuffer[Idx];
1126 }
1127 else
1128 {
1129 Data = *((const uint16*)&State->TxBuffer[Idx]);
1130 }
1131 Base->PUSHR.FIFO.TX = Data;
1132 }
1133 }
1134 else
1135 {
1136 for (Count = 0; Count < NumberOfWrites; Count++)
1137 {
1138 if(State->ExternalDevice->DeviceParams->FrameSize > 16u)
1139 {
1140 /* get first or second word (2 bytes) based on TxIndex */
1141 Factor = (uint16)((Count % 2u) * 16u);
1142 Data = (uint16)(State->ExternalDevice->DeviceParams->DefaultData >> Factor);
1143 }
1144 else
1145 {
1146 Data = (uint16)State->ExternalDevice->DeviceParams->DefaultData;
1147 }
1148 Base->PUSHR.FIFO.TX = Data;
1149 }
1150 }
1151 State->TxIndex += NumberOfWrites;
1152 }
1153
1154 /**
1155 * @brief This function will read data from RX FIFO in Asynchronous mode.
1156 * @details This function will read data from RX FIFO in Asynchronous mode.
1157 *
1158 * @param[in] NumberOfReads Number of bytes in RX FIFO will be read.
1159 * @param[in] Instance Index of the hardware instance.
1160 * @return none
1161 */
Spi_Ip_ReceiveData(uint16 NumberOfReads,uint8 Instance)1162 static void Spi_Ip_ReceiveData
1163 (
1164 uint16 NumberOfReads,
1165 uint8 Instance
1166 )
1167 {
1168 Spi_Ip_StateStructureType* const State = Spi_Ip_apxStateStructureArray[Instance];
1169 const SPI_Type* Base = Spi_Ip_apxBases[Instance];
1170 uint16 LimitedNumberOfReads = NumberOfReads;
1171 uint32 Data;
1172 uint32 Idx;
1173 uint32 LastIdx;
1174 uint32 Factor;
1175
1176 if (State->ExternalDevice->DeviceParams->FrameSize < 9u)
1177 {
1178 Factor = 1u;
1179 }
1180 else if (State->ExternalDevice->DeviceParams->FrameSize < 17u)
1181 {
1182 Factor = 2u;
1183 }
1184 else
1185 {
1186 Factor = 4u;
1187 }
1188
1189 /* Limits to remaining frames. */
1190 if (LimitedNumberOfReads > (State->ExpectedFifoReads - State->RxIndex))
1191 {
1192 LimitedNumberOfReads = State->ExpectedFifoReads - State->RxIndex;
1193 }
1194 if(NULL_PTR != State->RxBuffer)
1195 {
1196 LastIdx = ((uint32)State->RxIndex + LimitedNumberOfReads) * Factor;
1197 for (Idx = State->RxIndex * Factor; Idx < LastIdx; Idx += Factor)
1198 {
1199 Data = Base->POPR;
1200 if (State->ExternalDevice->DeviceParams->FrameSize < 9u)
1201 {
1202 State->RxBuffer[Idx] = (uint8)Data;
1203 }
1204 else if (State->ExternalDevice->DeviceParams->FrameSize < 17u)
1205 {
1206 *((uint16*)&State->RxBuffer[Idx]) = (uint16)Data;
1207 }
1208 else
1209 {
1210 *((uint32*)&State->RxBuffer[Idx]) = Data;
1211 }
1212 }
1213 }
1214 else
1215 {
1216 /* Discard data */
1217 for (Idx = 0; Idx < (uint32)LimitedNumberOfReads; Idx++)
1218 {
1219 (void)Base->POPR;
1220 }
1221 }
1222 State->RxIndex += LimitedNumberOfReads;
1223
1224 /* Update current TX FIFO slot */
1225 if (State->ExternalDevice->DeviceParams->FrameSize > 16u)
1226 {
1227 State->CurrentTxFifoSlot += LimitedNumberOfReads * 2u;
1228 }
1229 else
1230 {
1231 State->CurrentTxFifoSlot += LimitedNumberOfReads;
1232 }
1233 }
1234
1235 /**
1236 * @brief This function will finish transfer of a channel.
1237 * @details This function will finish transfer of a channel.
1238 *
1239 * @param[in] Instance Index of the hardware instance.
1240 * @param[in] ErrorFlag Save the status of transfer error flags
1241 * @return void
1242 */
Spi_Ip_ChannelFinished(uint8 Instance,boolean ErrorFlag)1243 static void Spi_Ip_ChannelFinished(uint8 Instance, boolean ErrorFlag)
1244 {
1245 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
1246 Spi_Ip_EventType EventState = SPI_IP_EVENT_FAULT;
1247
1248 if (TRUE == ErrorFlag)
1249 {
1250 State->Status = SPI_IP_FAULT;
1251 EventState = SPI_IP_EVENT_FAULT;
1252 }
1253 else
1254 {
1255 State->Status = SPI_IP_IDLE;
1256 EventState = SPI_IP_EVENT_END_TRANSFER;
1257 }
1258
1259 if (NULL_PTR != State->Callback)
1260 {
1261 State->Callback(Instance, EventState);
1262 }
1263 else
1264 {
1265 (void)EventState;
1266 }
1267 }
1268
1269 /**
1270 * @brief Write to FIFOs to start Async Transfer
1271 * @details Write TX and TXCMD
1272 *
1273 * @param[in] State State of the current transfer
1274 * @param[in] Base Base address of the instance regs to be updated
1275 * @return void
1276 */
Spi_Ip_AsyncStart(Spi_Ip_StateStructureType * State,SPI_Type * Base)1277 static void Spi_Ip_AsyncStart(Spi_Ip_StateStructureType* State, SPI_Type *Base)
1278 {
1279 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
1280 uint16 NumberOfWrites = 0u;
1281 #endif
1282 uint16 RemainingWrites = State->ExpectedFifoWrites - State->TxIndex;
1283
1284 /* Enable interrupts. */
1285 Base->RSER = SPI_RSER_TCF_RE(1) | SPI_RSER_TFUF_RE(1) | SPI_RSER_RFOF_RE(1);
1286 /* Fill data into TX FIFO to trigger TCF interrupt */
1287 /* Write to CMD field first*/
1288 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
1289 if ((boolean)FALSE == State->PhyUnitConfig->SlaveMode)
1290 #endif
1291 {
1292 (void)Spi_Ip_WriteCmdFifo(State, Base);
1293 /* Limits to remaining frames. */
1294 if (State->CurrentTxFifoSlot > RemainingWrites)
1295 {
1296 State->CurrentTxFifoSlot = RemainingWrites;
1297 }
1298 if (State->CurrentTxFifoSlot != 0u)
1299 {
1300 Spi_Ip_WriteTxFifo(State->CurrentTxFifoSlot, State->ExternalDevice->Instance);
1301 /* Update current TX FIFO slot can be written */
1302 State->CurrentTxFifoSlot = 0u;
1303 }
1304 }
1305 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
1306 else
1307 {
1308 /* In slave mode, TXFIFO will be wrote full first before Master device performs transmision */
1309 NumberOfWrites = (uint16)((Base->SR) & SPI_SR_TXCTR_MASK) >> SPI_SR_TXCTR_SHIFT;
1310 if (NumberOfWrites < SPI_IP_FIFO_SIZE_U16)
1311 {
1312 NumberOfWrites = SPI_IP_FIFO_SIZE_U16 - NumberOfWrites;
1313 }
1314 else
1315 {
1316 NumberOfWrites = 0u;
1317 }
1318 /* Limits to remaining frames. */
1319 if (NumberOfWrites > RemainingWrites)
1320 {
1321 NumberOfWrites = RemainingWrites;
1322 }
1323 /* Write to TXFIFO*/
1324 Spi_Ip_WriteTxFifo(NumberOfWrites, State->ExternalDevice->Instance);
1325 /* Update current TX FIFO available */
1326 State->CurrentTxFifoSlot -= NumberOfWrites;
1327 }
1328 #endif
1329 }
1330
1331 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1332 /**
1333 * @brief This function will verify the validation of some input parameters of transmision functions.
1334 * @details This function will verify the validation of some input parameters of transmision functions.
1335 *
1336 * @param[in] ExternalDevice Pointer to the external device where data is transmitted.
1337 * @param[in] Length Number of bytes to be sent.
1338 * @return SPI_IP_STATUS_SUCCESS: Don't have any errors was found.
1339 * SPI_IP_STATUS_FAIL: Transmission command has not been accepted.
1340 */
Spi_Ip_CheckValidParameters(const Spi_Ip_ExternalDeviceType * ExternalDevice,uint16 Length,const uint8 * TxBuffer,const uint8 * RxBuffer,uint32 TimeOut)1341 static void Spi_Ip_CheckValidParameters
1342 (
1343 const Spi_Ip_ExternalDeviceType *ExternalDevice,
1344 uint16 Length,
1345 const uint8 *TxBuffer,
1346 const uint8 *RxBuffer,
1347 uint32 TimeOut
1348 )
1349 {
1350 DevAssert(ExternalDevice != NULL_PTR);
1351 DevAssert(0u != Length);
1352 DevAssert(0u != TimeOut);
1353 if (ExternalDevice->DeviceParams->FrameSize > 16u)
1354 {
1355 DevAssert((Length % 4u) == 0u);
1356 }
1357 else if (ExternalDevice->DeviceParams->FrameSize > 8u)
1358 {
1359 DevAssert((Length % 2u) == 0u);
1360 }
1361 else
1362 {
1363 /* do nothing */
1364 }
1365 DevAssert(Spi_Ip_apxStateStructureArray[ExternalDevice->Instance] != NULL_PTR);
1366 #if ((CPU_TYPE == CPU_TYPE_64) && (SPI_IP_DMA_USED == STD_ON))
1367 if(State->PhyUnitConfig->DmaUsed)
1368 {
1369 /* On DMA with version < 5 only 32 bits addresses are supported.*/
1370 /* SPI IP is not used in any platform with version > 5 */
1371 DevAssert((TxBuffer >> 32) == 0);
1372 DevAssert((RxBuffer >> 32) == 0);
1373 }
1374 #else
1375 (void)TxBuffer;
1376 (void)RxBuffer;
1377 #endif
1378 }
1379 #endif
1380 /*==================================================================================================
1381 * GLOBAL FUNCTIONS
1382 ==================================================================================================*/
1383 #if (SPI_IP_DMA_USED == STD_ON)
1384 /**
1385 * @brief This function will process DMA transfer complete interrupt.
1386 * @details This function will process continue transfer or end of transfer via DMA.
1387 *
1388 * @param[in] Instance Index of the hardware instance.
1389 * @return void
1390 * @implements Spi_Ip_IrqDmaHandler_Activity
1391 */
Spi_Ip_IrqDmaHandler(uint8 Instance)1392 void Spi_Ip_IrqDmaHandler(uint8 Instance)
1393 {
1394 SPI_Type* Base = Spi_Ip_apxBases[Instance];
1395 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
1396 uint32 SrStatusRegister = 0u;
1397 boolean ErrorFlag = (boolean)FALSE;
1398 boolean EndOfTransferFlag = (boolean)FALSE;
1399
1400 if((NULL_PTR != State) && (SPI_IP_BUSY == State->Status))
1401 {
1402 /* Read Status and clear all flags. */
1403 SrStatusRegister = Base->SR;
1404 Base->SR &= 0xFFFF0000u;
1405
1406 if ( (0u != (SrStatusRegister & (SPI_SR_RFOF_MASK | SPI_SR_TFUF_MASK))) )
1407 {
1408 /* mark error flag */
1409 ErrorFlag = (boolean)TRUE;
1410 }
1411 else
1412 {
1413 if (State->ExpectedFifoReads != State->RxIndex)
1414 {
1415 Spi_Ip_DmaTxRxUpdateLoop(State);
1416 }
1417 else
1418 {
1419 EndOfTransferFlag = (boolean)TRUE;
1420 }
1421 }
1422
1423 if (((boolean)TRUE == EndOfTransferFlag) || ((boolean)TRUE == ErrorFlag))
1424 {
1425 /* Only set HALT bit to stop transfer when there is a error or request to de-assert CS at the end of transfers sequence */
1426 if(((boolean)TRUE == ErrorFlag) || ((boolean)FALSE == State->KeepCs))
1427 {
1428 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_13();
1429 /* Stop Transfer */
1430 Base->MCR |= SPI_MCR_HALT_MASK;
1431 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_13();
1432 }
1433 /* Disable interrupts and DMA requests. */
1434 Base->RSER = 0U;
1435
1436 if((boolean)TRUE == ErrorFlag)
1437 {
1438 State->Status = SPI_IP_FAULT;
1439 }
1440 else
1441 {
1442 State->Status = SPI_IP_IDLE;
1443 }
1444 if (State->Callback != NULL_PTR)
1445 {
1446 if((boolean)TRUE == ErrorFlag)
1447 {
1448 State->Callback(Instance, SPI_IP_EVENT_FAULT);
1449 }
1450 else
1451 {
1452 State->Callback(Instance, SPI_IP_EVENT_END_TRANSFER);
1453 }
1454 }
1455 }
1456 }
1457 else
1458 {
1459 /* Driver is initialized but there was no poll request*/
1460 /* nothing to do */
1461 }
1462 }
1463 #endif /* (SPI_IP_DMA_USED == STD_ON) */
1464
1465 #if (STD_ON == SPI_IP_ENABLE_USER_MODE_SUPPORT)
1466 /**
1467 * @brief This function will set UAA bit in REG_PROT for SPI unit
1468 */
Spi_Ip_SetUserAccess(uint8 Instance)1469 void Spi_Ip_SetUserAccess(uint8 Instance)
1470 {
1471 SPI_Type* Base = Spi_Ip_apxBases[Instance];
1472
1473 SET_USER_ACCESS_ALLOWED((uint32)Base,SPI_IP_PROT_MEM_U32);
1474 }
1475
1476 /**
1477 * @brief This function will enable writing all SPI registers under protection in User mode by configuring REG_PROT
1478 */
Spi_Ip_SetUserAccessAllowed(uint8 Instance)1479 static void Spi_Ip_SetUserAccessAllowed(uint8 Instance)
1480 {
1481 OsIf_Trusted_Call1param(Spi_Ip_SetUserAccess, Instance);
1482 }
1483 #endif /* SPI_IP_ENABLE_USER_MODE_SUPPORT */
1484 /*================================================================================================*/
Spi_Ip_Init(const Spi_Ip_ConfigType * PhyUnitConfigPtr)1485 Spi_Ip_StatusType Spi_Ip_Init(const Spi_Ip_ConfigType *PhyUnitConfigPtr)
1486 {
1487 SPI_Type* Base;
1488 Spi_Ip_StateStructureType* State;
1489 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1490 uint8 Instance = 0u;
1491
1492 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1493 DevAssert(PhyUnitConfigPtr != NULL_PTR);
1494 #endif
1495 Instance = PhyUnitConfigPtr->Instance;
1496 State = Spi_Ip_apxStateStructureArray[Instance];
1497 Base = Spi_Ip_apxBases[Instance];
1498 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1499 DevAssert(State == NULL_PTR);
1500 #endif
1501 Spi_Ip_apxStateStructureArray[Instance] = &Spi_Ip_axStateStructure[PhyUnitConfigPtr->StateIndex];
1502 State = Spi_Ip_apxStateStructureArray[Instance];
1503 State->PhyUnitConfig = PhyUnitConfigPtr;
1504 #if (STD_ON == SPI_IP_ENABLE_USER_MODE_SUPPORT)
1505 Spi_Ip_SetUserAccessAllowed(Instance);
1506 #endif
1507 /* Halt before update other fields */
1508 Base->MCR |= SPI_MCR_HALT_MASK;
1509 Base->MCR = PhyUnitConfigPtr->Mcr;
1510
1511 #if (SPI_IP_DUAL_CLOCK_MODE == STD_ON)
1512 State->ClockMode = SPI_IP_NORMAL_CLOCK;
1513 #endif
1514 State->KeepCs = (boolean)FALSE;
1515 #if (SPI_IP_DMA_USED == STD_ON)
1516 #if (SPI_IP_ENABLE_DMAFASTTRANSFER_SUPPORT == STD_ON)
1517 Spi_Ip_TxDmaTcdSGInit(Instance);
1518 Spi_Ip_RxDmaTcdSGInit(Instance);
1519 #endif
1520 if((boolean)TRUE == State->PhyUnitConfig->DmaUsed)
1521 {
1522 Spi_Ip_CmdDmaTcdSGInit(Instance);
1523 }
1524 #endif
1525 /* set State to idle */
1526 Status = Spi_Ip_UpdateTransferMode(Instance, PhyUnitConfigPtr->TransferMode);
1527 if (SPI_IP_STATUS_SUCCESS == Status)
1528 {
1529 State->Status = SPI_IP_IDLE;
1530 }
1531 return Status;
1532 }
1533 /*================================================================================================*/
Spi_Ip_DeInit(uint8 Instance)1534 Spi_Ip_StatusType Spi_Ip_DeInit(uint8 Instance)
1535 {
1536 SPI_Type* Base;
1537 const Spi_Ip_StateStructureType* State;
1538 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1539
1540 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1541 DevAssert(Instance < SPI_INSTANCE_COUNT);
1542 #endif
1543 Base = Spi_Ip_apxBases[Instance];
1544 State = Spi_Ip_apxStateStructureArray[Instance];
1545 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1546 DevAssert(State != NULL_PTR);
1547 #endif
1548 if (State->Status == SPI_IP_BUSY)
1549 {
1550 Status = SPI_IP_STATUS_FAIL;
1551 }
1552 else
1553 {
1554 /* Halt before update other fields */
1555 Base->MCR |= SPI_MCR_HALT_MASK;
1556 /* Reset FIFOs */
1557 Base->MCR |= SPI_MCR_CLR_TXF_MASK | SPI_MCR_CLR_RXF_MASK;
1558 Base->MCR = 0x4001u;
1559 Base->MODE.CTAR[0] = 0x78000000u;
1560 Base->SR = 0xFFFF0000u;
1561 Base->RSER = 0u;
1562 Base->CTARE[0] = 0x1u;
1563
1564 Spi_Ip_apxStateStructureArray[Instance] = NULL_PTR;
1565 }
1566 return Status;
1567 }
1568
Spi_Ip_IntoBusyState(Spi_Ip_StateStructureType * State)1569 static Spi_Ip_StatusType Spi_Ip_IntoBusyState(Spi_Ip_StateStructureType* State)
1570 {
1571 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1572 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_09();
1573 if (SPI_IP_BUSY == State->Status)
1574 {
1575 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_09();
1576 Status = SPI_IP_STATUS_FAIL;
1577 }
1578 else
1579 {
1580 /* Mark the hardware as busy. */
1581 State->Status = SPI_IP_BUSY;
1582 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_09();
1583 }
1584 return Status;
1585 }
1586
1587 /* Iteration of reading and writing the FIFO. Returns true if something action was taken inside the iteration */
Spi_Ip_SyncReadWriteStep(Spi_Ip_StateStructureType * State,SPI_Type * Base,uint8 Instance)1588 static boolean Spi_Ip_SyncReadWriteStep(Spi_Ip_StateStructureType *State, SPI_Type *Base, uint8 Instance)
1589 {
1590 boolean StepDone = FALSE;
1591 uint16 NumberOfReads;
1592
1593 /* SEND CMD */
1594 if (State->ExpectedCmdFifoWrites != 0u)
1595 {
1596 if (Spi_Ip_WriteCmdFifo(State, Base) != 0u)
1597 {
1598 StepDone = TRUE;
1599 }
1600 }
1601 /* TRANSMIT */
1602
1603 /* Push data until HW fifo is full or transfer is done. */
1604 /* After driver code read all frames in RX FIFO, if there are still some frames in TX FIFO, at the time before driver code check number of frames available in TX FIFO
1605 to prepare to fill TX FIFO. At that time, interrupt occurred, and the time to process interrupt is longer than the time to transfer all frames in TX FIFO.
1606 So TX FIFO will be empty and some frames received in RX FIFO, then the program is returned from interrupt and fill TX FIFO until full.
1607 And there is a interrupt occurred after that before read all frames in RX FIFO, and the time to process interrupt is longer than the time to transfer all frames in TX FIFO.
1608 So, RX FIFO can be overflow. To avoid this, limits total of frames in TX FIFO and RX FIFO must be lower than FIFO size.
1609 State->CurrentTxFifoSlot variable is used to hanlde number of frames are "on bus transfer". They are always less than FIFO size */
1610
1611 /* Limits to remaining frames. */
1612 if (State->CurrentTxFifoSlot > (State->ExpectedFifoWrites - State->TxIndex))
1613 {
1614 State->CurrentTxFifoSlot = State->ExpectedFifoWrites - State->TxIndex;
1615 }
1616 if (State->CurrentTxFifoSlot != 0u)
1617 {
1618 Spi_Ip_WriteTxFifo(State->CurrentTxFifoSlot, Instance);
1619 /* Update current TX FIFO slot can be written */
1620 State->CurrentTxFifoSlot = 0u;
1621 StepDone = TRUE;
1622 /* Clear Halt bit, then transfer */
1623 if(((Base->MCR) & SPI_MCR_HALT_MASK) == SPI_MCR_HALT_MASK)
1624 {
1625 Base->MCR &= ~SPI_MCR_HALT_MASK;
1626 }
1627 }
1628 /* RECEIVE */
1629 /* Read all data available in receive HW fifo. */
1630 NumberOfReads = (uint16)(((Base->SR) & SPI_SR_RXCTR_MASK) >> SPI_SR_RXCTR_SHIFT);
1631 if(NumberOfReads != 0u)
1632 {
1633 /* Read data from RX FIFO */
1634 Spi_Ip_ReceiveData(NumberOfReads, Instance);
1635 StepDone = TRUE;
1636 }
1637
1638 return StepDone;
1639 }
1640
1641 /*================================================================================================*/
Spi_Ip_SyncTransmit(const Spi_Ip_ExternalDeviceType * ExternalDevice,const uint8 * TxBuffer,uint8 * RxBuffer,uint16 Length,uint32 TimeOut)1642 Spi_Ip_StatusType Spi_Ip_SyncTransmit(
1643 const Spi_Ip_ExternalDeviceType *ExternalDevice,
1644 const uint8 *TxBuffer,
1645 uint8 *RxBuffer,
1646 uint16 Length,
1647 uint32 TimeOut
1648 )
1649 {
1650 SPI_Type *Base;
1651 Spi_Ip_StateStructureType *State;
1652 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1653 uint32 TimeoutTicks = OsIf_MicrosToTicks(TimeOut, SPI_IP_TIMEOUT_METHOD);
1654 uint32 CurrentTicks = 0u; /* initialize current counter */
1655 uint32 ElapsedTicks = 0u; /* elapsed will give timeout */
1656 uint8 Instance = 0u;
1657 boolean StepDone;
1658
1659 #if (STD_ON == SPI_IP_DEV_ERROR_DETECT)
1660 Spi_Ip_CheckValidParameters(ExternalDevice, Length, TxBuffer, RxBuffer, TimeOut);
1661 #endif
1662 Instance = ExternalDevice->Instance;
1663 State = Spi_Ip_apxStateStructureArray[Instance];
1664
1665 Base = Spi_Ip_apxBases[Instance];
1666 Status = Spi_Ip_IntoBusyState(State);
1667 if (SPI_IP_STATUS_SUCCESS == Status)
1668 {
1669 /* Make sure that FIFOs will be empty before start new transfer session */
1670 Base->MCR |= SPI_MCR_CLR_TXF_MASK | SPI_MCR_CLR_RXF_MASK;
1671 /* Reset Flags */
1672 Base->SR = 0xFFFF0000u;
1673
1674 State->TxBuffer = TxBuffer;
1675 State->RxBuffer = RxBuffer;
1676 State->ExternalDevice = ExternalDevice;
1677
1678 Spi_Ip_PrepareTransfer(State, Base, Length);
1679
1680 CurrentTicks = OsIf_GetCounter(SPI_IP_TIMEOUT_METHOD); /* initialize current counter */
1681
1682 /* Get current FIFO slots are available.*/
1683 State->CurrentTxFifoSlot = SPI_IP_FIFO_SIZE_U16;
1684 while(SPI_IP_STATUS_SUCCESS == Status)
1685 {
1686 /* Read and write FIFO registers */
1687 StepDone = Spi_Ip_SyncReadWriteStep(State, Base, Instance);
1688 if (StepDone)
1689 {
1690 /* Finish the transfer if all frames have received */
1691 if (State->RxIndex == State->ExpectedFifoReads)
1692 {
1693 break;
1694 }
1695 ElapsedTicks = 0u;
1696 }
1697
1698 /* Check if errors like overflow or underflow are reported in Status register or timeout error */
1699 ElapsedTicks += OsIf_GetElapsed(&CurrentTicks, SPI_IP_TIMEOUT_METHOD);
1700 if (ElapsedTicks >= TimeoutTicks)
1701 {
1702 Status = SPI_IP_TIMEOUT;
1703 }
1704 }
1705 /* Only set HALT bit to stop transfer when there is a error or request to de-assert CS at the end of transfers sequence */
1706 if((SPI_IP_STATUS_SUCCESS != Status) || ((boolean)FALSE == State->KeepCs))
1707 {
1708 /* Stop transfer */
1709 Base->MCR |= SPI_MCR_HALT_MASK;
1710 }
1711 /* Channel finished */
1712 Spi_Ip_ChannelFinished(Instance, FALSE);
1713 }
1714 return Status;
1715 }
1716
Spi_Ip_AsyncTransmit(const Spi_Ip_ExternalDeviceType * ExternalDevice,const uint8 * TxBuffer,uint8 * RxBuffer,uint16 Length,Spi_Ip_CallbackType EndCallback)1717 Spi_Ip_StatusType Spi_Ip_AsyncTransmit(
1718 const Spi_Ip_ExternalDeviceType *ExternalDevice,
1719 const uint8 *TxBuffer,
1720 uint8 *RxBuffer,
1721 uint16 Length,
1722 Spi_Ip_CallbackType EndCallback
1723 )
1724 {
1725 SPI_Type* Base;
1726 Spi_Ip_StateStructureType* State;
1727 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1728
1729 #if (STD_ON == SPI_IP_DEV_ERROR_DETECT)
1730 Spi_Ip_CheckValidParameters(ExternalDevice, Length, TxBuffer, RxBuffer, 1u);
1731 #endif
1732
1733 State = Spi_Ip_apxStateStructureArray[ExternalDevice->Instance];
1734 Base = Spi_Ip_apxBases[ExternalDevice->Instance];
1735 Status = Spi_Ip_IntoBusyState(State);
1736 if (SPI_IP_STATUS_SUCCESS == Status)
1737 {
1738 /* Update State structure. */
1739 State->TxBuffer = TxBuffer;
1740 State->RxBuffer = RxBuffer;
1741 State->Callback = EndCallback;
1742
1743 State->CurrentTxFifoSlot = SPI_IP_FIFO_SIZE_U16;
1744 State->ExternalDevice = ExternalDevice;
1745
1746 Spi_Ip_PrepareTransfer(State, Base, Length);
1747 /* Make sure that FIFOs will be empty before start new transfer session */
1748 Base->MCR |= SPI_MCR_CLR_TXF_MASK | SPI_MCR_CLR_RXF_MASK;
1749 /* Clear all flags. */
1750 Base->SR &= 0xFFFF0000u;
1751
1752 #if (SPI_IP_DMA_USED == STD_ON)
1753 if((boolean)FALSE == State->PhyUnitConfig->DmaUsed)
1754 #endif
1755 {
1756 if (State->TransferMode == SPI_IP_POLLING)
1757 {
1758 /* Disable interrupts and DMA requests. */
1759 Base->RSER = 0U;
1760 } else {
1761 /* Interrupt mode */
1762 Spi_Ip_AsyncStart(State, Base);
1763 }
1764 }
1765 #if (SPI_IP_DMA_USED == STD_ON)
1766 else
1767 {
1768 Spi_Ip_DmaAsyncStart(State, Base, (State->TransferMode == SPI_IP_POLLING) ? 0u : 1u);
1769 }
1770 #endif
1771 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_11();
1772 /* Clear Halt bit before transfer */
1773 Base->MCR &= ~SPI_MCR_HALT_MASK;
1774 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_11();
1775 }
1776 return Status;
1777 }
1778
1779 #if ((SPI_IP_DMA_USED == STD_ON) && (SPI_IP_ENABLE_DMAFASTTRANSFER_SUPPORT == STD_ON))
Spi_Ip_AsyncTransmitFast(const Spi_Ip_FastTransferType * FastTransferCfg,uint8 NumberOfTransfer,Spi_Ip_CallbackType EndCallback)1780 Spi_Ip_StatusType Spi_Ip_AsyncTransmitFast(
1781 const Spi_Ip_FastTransferType *FastTransferCfg,
1782 uint8 NumberOfTransfer,
1783 Spi_Ip_CallbackType EndCallback
1784 )
1785 {
1786 SPI_Type* Base;
1787 Spi_Ip_StateStructureType* State;
1788 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
1789 uint8 Instance = 0u;
1790 uint32 LsbValue = 0u;
1791 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
1792 uint8 Count = 0u;
1793
1794 DevAssert(NULL_PTR != FastTransferCfg);
1795 DevAssert(NULL_PTR != FastTransferCfg[0u].ExternalDevice);
1796
1797 State = Spi_Ip_apxStateStructureArray[FastTransferCfg[0u].ExternalDevice->Instance];
1798
1799 DevAssert(NULL_PTR != State);
1800 DevAssert(State->TransferMode == SPI_IP_INTERRUPT);
1801 DevAssert(NumberOfTransfer <= State->PhyUnitConfig->MaxNumOfFastTransfer);
1802
1803 for(Count = 0u; Count < NumberOfTransfer; Count++)
1804 {
1805 DevAssert(NULL_PTR != FastTransferCfg[Count].ExternalDevice);
1806 DevAssert(0u != FastTransferCfg[Count].Length);
1807 if (FastTransferCfg[0u].ExternalDevice->DeviceParams->FrameSize > 16u)
1808 {
1809 DevAssert((FastTransferCfg[Count].Length % 4u) == 0u);
1810 }
1811 else if (FastTransferCfg[0u].ExternalDevice->DeviceParams->FrameSize > 8u)
1812 {
1813 DevAssert((FastTransferCfg[Count].Length % 2u) == 0u);
1814 }
1815 else
1816 {
1817 DevAssert(SPI_IP_DMA_MAX_ITER_CNT_U16 >= FastTransferCfg[Count].Length);
1818 }
1819 }
1820 #endif
1821 Instance = FastTransferCfg[0u].ExternalDevice->Instance;
1822 Base = Spi_Ip_apxBases[Instance];
1823 State = Spi_Ip_apxStateStructureArray[Instance];
1824
1825 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_14();
1826 if (SPI_IP_BUSY == State->Status)
1827 {
1828 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_14();
1829 Status = SPI_IP_STATUS_FAIL;
1830 }
1831 else
1832 {
1833 /* Mark the hardware as busy. */
1834 State->Status = SPI_IP_BUSY;
1835 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_14();
1836
1837 /* Make sure that FIFOs will be empty before start new transfer session */
1838 Base->MCR |= SPI_MCR_CLR_TXF_MASK | SPI_MCR_CLR_RXF_MASK;
1839 /* Clear all flags. */
1840 Base->SR &= 0xFFFF0000u;
1841
1842 /* Update State structure. */
1843 /* For Dma Fast transfer, All transfers use the same HWUnit and in Master mode only. Only some parameters can be changed as Continuous CS, PCS.
1844 Some parameters such as Baudrate, Clock Polarity, Clock Phase, Delays timming configuration, Bit Order, Frame Size
1845 must be the same between transfers. So, make sure they are configured the same in each External Device allocated for Dma Fast Transfers.
1846 And all those attributes can be got from first transfer in FastTransferCfg */
1847 State->ExternalDevice = FastTransferCfg[0u].ExternalDevice;
1848 /* Get Lsb value */
1849 LsbValue = State->ExternalDevice->DeviceParams->Lsb ? 1UL : 0UL;
1850 State->Callback = EndCallback;
1851 /* Stop transfer at the end of transfers sequence by set HALT bit in Spi_Ip_IrqDmaHandler */
1852 State->KeepCs = (boolean)FALSE;
1853
1854 /* Configure external device parameters like: frane size, clock phase, clock polarity. */
1855 #if (SPI_IP_DUAL_CLOCK_MODE == STD_ON)
1856 Base->MODE.CTAR[0u] = FastTransferCfg[0u].ExternalDevice->Ctar[State->ClockMode] | SPI_CTAR_FMSZ(((uint32)State->ExternalDevice->DeviceParams->FrameSize - 1u) & 0xFu) | SPI_CTAR_LSBFE(LsbValue);
1857 #else
1858 Base->MODE.CTAR[0u] = FastTransferCfg[0u].ExternalDevice->Ctar | SPI_CTAR_FMSZ(((uint32)State->ExternalDevice->DeviceParams->FrameSize - 1u) & (uint32)0xFu) | SPI_CTAR_LSBFE(LsbValue);
1859 #endif
1860 Base->CTARE[0u] = FastTransferCfg[0u].ExternalDevice->Ctare | SPI_CTARE_FMSZE((((uint32)State->ExternalDevice->DeviceParams->FrameSize - 1u) >> 4u) & (uint32)0x1u);
1861 /* Config and Enable DMA request. */
1862 Spi_Ip_DmaFastConfig(Instance, FastTransferCfg, NumberOfTransfer);
1863 Base->RSER = SPI_RSER_CMDFFF_RE(1u) | SPI_RSER_CMDFFF_DIRS(1u) | SPI_RSER_TFFF_RE(1u) | SPI_RSER_TFFF_DIRS(1u) | SPI_RSER_RFDF_RE(1u) | SPI_RSER_RFDF_DIRS(1u);
1864
1865 /* Clear Halt bit before transfer */
1866 Base->MCR &= ~SPI_MCR_HALT_MASK;
1867 }
1868 return Status;
1869 }
1870
1871 /**
1872 * @brief This function will configure Scatter/Gather TCDs for the channels TX DMA, RX DMA and CMD DMA
1873 * according to Dma Fast transfers configuration. DMA channels will be started at the end of the function.
1874 */
Spi_Ip_DmaFastConfig(uint8 Instance,const Spi_Ip_FastTransferType * FastTransferCfg,uint8 NumberOfTransfer)1875 static void Spi_Ip_DmaFastConfig(uint8 Instance, const Spi_Ip_FastTransferType *FastTransferCfg, uint8 NumberOfTransfer)
1876 {
1877 Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
1878 boolean ClearCS = (boolean)FALSE;
1879 uint8 DisHwRequest = 0u;
1880 uint8 Count = 0u;
1881 uint8 CmdTCDSGIndex = 0u;
1882 Dma_Ip_LogicChannelTransferListType DmaTcdList[1u];
1883
1884 DmaTcdList[0u].Param = DMA_IP_CH_SET_CONTROL_EN_MAJOR_INTERRUPT;
1885 DmaTcdList[0u].Value = 1u;
1886
1887 for(Count = 0u; Count < NumberOfTransfer; Count++)
1888 {
1889 /* Update State structure. */
1890 State->RxIndex = 0u;
1891 State->TxIndex = 0u;
1892 State->TxBuffer = FastTransferCfg[Count].TxBuffer;
1893 State->RxBuffer = FastTransferCfg[Count].RxBuffer;
1894 if (State->ExternalDevice->DeviceParams->FrameSize < 9u)
1895 {
1896 State->ExpectedFifoWrites = FastTransferCfg[Count].Length;
1897 }
1898 else
1899 {
1900 State->ExpectedFifoWrites = FastTransferCfg[Count].Length/2u;
1901 }
1902 State->ExpectedFifoReads = State->ExpectedFifoWrites;
1903 if (State->ExternalDevice->DeviceParams->FrameSize >16u)
1904 {
1905 State->ExpectedFifoReads = State->ExpectedFifoWrites/2u;
1906 }
1907 State->ExpectedCmdFifoWrites = State->ExpectedFifoReads;
1908 State->PhyUnitConfig->CmdDmaFast[Count].DefaultData = FastTransferCfg[Count].DefaultData;
1909 State->PhyUnitConfig->CmdDmaFast[Count].DmaFastPushrCmd = FastTransferCfg[Count].ExternalDevice->PushrCmd;
1910 State->PhyUnitConfig->CmdDmaFast[Count].DmaFastPushrCmdLast = FastTransferCfg[Count].ExternalDevice->PushrCmd & (~((uint16)((SPI_PUSHR_CONT_MASK | SPI_PUSHR_PP_MCSC_MASK) >> 16u)));
1911
1912 /* CS will be cleared for last transfer or depend on KeepCs if not last transfer.
1913 Disable DMA HW request at the end of transfer. */
1914 if(Count == (NumberOfTransfer - 1u))
1915 {
1916 ClearCS = (boolean)TRUE;
1917 DisHwRequest = 1u;
1918 }
1919 else
1920 {
1921 if((boolean)FALSE == FastTransferCfg[Count].KeepCs)
1922 {
1923 ClearCS = (boolean)TRUE;
1924 }
1925 else
1926 {
1927 ClearCS = (boolean)FALSE;
1928 }
1929 DisHwRequest = 0u;
1930 }
1931
1932 /* Configure software TCDs Scatter Gather for CMD DMA channel */
1933 if((boolean)TRUE == ClearCS)
1934 {
1935 if(State->ExpectedCmdFifoWrites > 1u)
1936 {
1937 Spi_Ip_CmdDmaTcdSGConfig( Instance,
1938 State->PhyUnitConfig->TxCmdDmaSGId[CmdTCDSGIndex],
1939 (Dma_Ip_uintPtrType)&State->PhyUnitConfig->CmdDmaFast[Count].DmaFastPushrCmd,
1940 State->ExpectedCmdFifoWrites - 1u,
1941 0u /* not disable dma hw request, the transfer will be continued after next software TCD Scatter Gather loaded */
1942 );
1943 CmdTCDSGIndex++;
1944 }
1945 Spi_Ip_CmdDmaTcdSGConfig( Instance,
1946 State->PhyUnitConfig->TxCmdDmaSGId[CmdTCDSGIndex],
1947 (Dma_Ip_uintPtrType)&State->PhyUnitConfig->CmdDmaFast[Count].DmaFastPushrCmdLast,
1948 1u, /* transfer last command to clear CS */
1949 DisHwRequest
1950 );
1951 CmdTCDSGIndex++;
1952 }
1953 else
1954 {
1955 Spi_Ip_CmdDmaTcdSGConfig( Instance,
1956 State->PhyUnitConfig->TxCmdDmaSGId[CmdTCDSGIndex],
1957 (Dma_Ip_uintPtrType)&State->PhyUnitConfig->CmdDmaFast[Count].DmaFastPushrCmd,
1958 State->ExpectedCmdFifoWrites,
1959 DisHwRequest
1960 );
1961 CmdTCDSGIndex++;
1962 }
1963 State->ExpectedCmdFifoWrites = 0u;
1964
1965 /* Configure software TCDs Scatter Gather for TX DMA channel */
1966 Spi_Ip_TxDmaTcdSGConfig(Instance, Count, DisHwRequest);
1967
1968 /* Configure software TCDs Scatter Gather for RX DMA channel */
1969 Spi_Ip_RxDmaTcdSGConfig(Instance, Count, DisHwRequest);
1970 }
1971
1972 /* When all transfers session are completed and next TCD ScatterGather is loaded to HW. If next TCD ScatterGather loaded has INTMAJOR=0 then
1973 Dma_Ip interrupt function will not call Spi Dma notification due to it understood as spurious interrupt(Done flag = 1, INTMAJOR=0).
1974 So, the workaround is set INTMAJOR=1 for next TCD ScatterGather. */
1975 if (NumberOfTransfer < State->PhyUnitConfig->NumberRxSG)
1976 {
1977 /* Set INTMAJOR=1 for next RX TCD ScatterGather */
1978 (void)Dma_Ip_SetLogicChannelScatterGatherList(State->PhyUnitConfig->RxDmaChannel, State->PhyUnitConfig->RxDmaFastSGId[NumberOfTransfer], DmaTcdList, 1u);
1979 }
1980
1981 /* Load first software TCD to hardware TCD for CMD DMA channel */
1982 (void)Dma_Ip_SetLogicChannelScatterGatherConfig(State->PhyUnitConfig->TxCmdDmaChannel, State->PhyUnitConfig->TxCmdDmaSGId[0u]);
1983 /* Load first software TCD to hardware TCD for TX DMA channel */
1984 (void)Dma_Ip_SetLogicChannelScatterGatherConfig(State->PhyUnitConfig->TxDmaChannel, State->PhyUnitConfig->TxDmaFastSGId[0u]);
1985 /* Load first software TCD to hardware TCD for RX DMA channel */
1986 (void)Dma_Ip_SetLogicChannelScatterGatherConfig(State->PhyUnitConfig->RxDmaChannel, State->PhyUnitConfig->RxDmaFastSGId[0u]);
1987
1988 /* Enable HW request for RX DMA channel before TX DMA channel */
1989 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->RxDmaChannel, DMA_IP_CH_SET_HARDWARE_REQUEST);
1990 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxCmdDmaChannel, DMA_IP_CH_SET_HARDWARE_REQUEST);
1991 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxDmaChannel, DMA_IP_CH_SET_HARDWARE_REQUEST);
1992 }
1993 #endif
1994
Spi_Ip_ManageBuffers(uint8 Instance)1995 void Spi_Ip_ManageBuffers(uint8 Instance)
1996 {
1997 const Spi_Ip_StateStructureType* State;
1998 #if (SPI_IP_DMA_USED == STD_ON)
1999 Dma_Ip_LogicChannelStatusType DmaChannelStatus;
2000 #endif
2001
2002 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2003 DevAssert(Instance < SPI_INSTANCE_COUNT);
2004 #endif
2005 State = Spi_Ip_apxStateStructureArray[Instance];
2006 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2007 DevAssert(State != NULL_PTR);
2008 #endif
2009 if(SPI_IP_POLLING == State->TransferMode)
2010 {
2011 #if (SPI_IP_DMA_USED == STD_ON)
2012 if((boolean)FALSE == State->PhyUnitConfig->DmaUsed)
2013 #endif
2014 {
2015 Spi_Ip_TransferProcess(Instance);
2016 }
2017 #if (SPI_IP_DMA_USED == STD_ON)
2018 else
2019 {
2020 DmaChannelStatus.Done = FALSE;
2021 (void)Dma_Ip_GetLogicChannelStatus(State->PhyUnitConfig->RxDmaChannel, &DmaChannelStatus);
2022 if((boolean)TRUE == DmaChannelStatus.Done)
2023 {
2024 /* Clear DONE bit */
2025 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->RxDmaChannel, DMA_IP_CH_CLEAR_DONE);
2026 Spi_Ip_IrqDmaHandler(Instance);
2027 }
2028 }
2029 #endif
2030 }
2031 }
2032 /*================================================================================================*/
2033
Spi_Ip_UpdateTransferParam(const Spi_Ip_ExternalDeviceType * ExternalDevice,const Spi_Ip_TransferAdjustmentType * Param)2034 Spi_Ip_StatusType Spi_Ip_UpdateTransferParam
2035 (
2036 const Spi_Ip_ExternalDeviceType *ExternalDevice,
2037 const Spi_Ip_TransferAdjustmentType *Param
2038 )
2039 {
2040 Spi_Ip_StateStructureType* State;
2041 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2042
2043 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2044 DevAssert(NULL_PTR != ExternalDevice);
2045 DevAssert(NULL_PTR != Param);
2046 DevAssert(NULL_PTR != Spi_Ip_apxStateStructureArray[ExternalDevice->Instance]);
2047 #endif
2048
2049 State = Spi_Ip_apxStateStructureArray[ExternalDevice->Instance];
2050 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_08();
2051 if (State->Status != SPI_IP_BUSY)
2052 {
2053 State->KeepCs = Param->KeepCs;
2054 if (Param->DeviceParams != NULL_PTR)
2055 {
2056 ExternalDevice->DeviceParams->DefaultData = Param->DeviceParams->DefaultData;
2057 ExternalDevice->DeviceParams->FrameSize = Param->DeviceParams->FrameSize;
2058 ExternalDevice->DeviceParams->Lsb = Param->DeviceParams->Lsb;
2059 }
2060 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_08();
2061 }
2062 else
2063 {
2064 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_08();
2065 Status = SPI_IP_STATUS_FAIL;
2066 }
2067
2068 return Status;
2069 }
2070
Spi_Ip_UpdateFrameSize(const Spi_Ip_ExternalDeviceType * ExternalDevice,uint8 FrameSize)2071 Spi_Ip_StatusType Spi_Ip_UpdateFrameSize(const Spi_Ip_ExternalDeviceType *ExternalDevice, uint8 FrameSize)
2072 {
2073 const Spi_Ip_StateStructureType* State;
2074 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2075
2076 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2077 DevAssert(ExternalDevice != NULL_PTR);
2078 DevAssert(SPI_IP_FRAMESIZE_MAX_U8 >= FrameSize);
2079 DevAssert(SPI_IP_FRAMESIZE_MIN_U8 <= FrameSize);
2080 #endif
2081 State = Spi_Ip_apxStateStructureArray[ExternalDevice->Instance];
2082 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2083 DevAssert(State != NULL_PTR);
2084 #endif
2085 /* Frame size can be changed when no transfers are in progress. */
2086 if (State->Status != SPI_IP_BUSY)
2087 {
2088 ExternalDevice->DeviceParams->FrameSize = FrameSize;
2089 }
2090 else
2091 {
2092 Status = SPI_IP_STATUS_FAIL;
2093 }
2094 return Status;
2095 }
2096
Spi_Ip_UpdateLsb(const Spi_Ip_ExternalDeviceType * ExternalDevice,boolean Lsb)2097 Spi_Ip_StatusType Spi_Ip_UpdateLsb(const Spi_Ip_ExternalDeviceType *ExternalDevice, boolean Lsb)
2098 {
2099 const Spi_Ip_StateStructureType* State;
2100 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2101
2102 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2103 DevAssert(ExternalDevice != NULL_PTR);
2104 #endif
2105 State = Spi_Ip_apxStateStructureArray[ExternalDevice->Instance];
2106 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2107 DevAssert(State != NULL_PTR);
2108 #endif
2109 /* Bite order can be changed when no transfers are in progress. */
2110 if (State->Status != SPI_IP_BUSY)
2111 {
2112 ExternalDevice->DeviceParams->Lsb = Lsb;
2113 }
2114 else
2115 {
2116 Status = SPI_IP_STATUS_FAIL;
2117 }
2118 return Status;
2119 }
2120
Spi_Ip_UpdateDefaultTransmitData(const Spi_Ip_ExternalDeviceType * ExternalDevice,uint32 DefaultData)2121 Spi_Ip_StatusType Spi_Ip_UpdateDefaultTransmitData(const Spi_Ip_ExternalDeviceType *ExternalDevice, uint32 DefaultData)
2122 {
2123 const Spi_Ip_StateStructureType* State;
2124 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2125
2126 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2127 DevAssert(ExternalDevice != NULL_PTR);
2128 #endif
2129 State = Spi_Ip_apxStateStructureArray[ExternalDevice->Instance];
2130 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2131 DevAssert(State != NULL_PTR);
2132 #endif
2133 /* Bite order can be changed when no transfers are in progress. */
2134 if (State->Status != SPI_IP_BUSY)
2135 {
2136 ExternalDevice->DeviceParams->DefaultData = DefaultData;
2137 }
2138 else
2139 {
2140 Status = SPI_IP_STATUS_FAIL;
2141 }
2142 return Status;
2143 }
2144
Spi_Ip_UpdateTransferMode(uint8 Instance,Spi_Ip_ModeType Mode)2145 Spi_Ip_StatusType Spi_Ip_UpdateTransferMode(uint8 Instance, Spi_Ip_ModeType Mode)
2146 {
2147 Spi_Ip_StateStructureType* State;
2148 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2149
2150 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2151 DevAssert(Instance < SPI_INSTANCE_COUNT);
2152 #endif
2153 State = Spi_Ip_apxStateStructureArray[Instance];
2154 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2155 DevAssert(State != NULL_PTR);
2156 #endif
2157 /* Transfer mode can be changed when no transfers are in progress. */
2158 if (State->Status != SPI_IP_BUSY)
2159 {
2160 State->TransferMode = Mode;
2161 }
2162 else
2163 {
2164 Status = SPI_IP_STATUS_FAIL;
2165 }
2166 return Status;
2167 }
2168
Spi_Ip_Cancel(uint8 Instance)2169 void Spi_Ip_Cancel(uint8 Instance)
2170 {
2171 SPI_Type* Base;
2172 Spi_Ip_StateStructureType* State;
2173
2174 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2175 DevAssert(Instance < SPI_INSTANCE_COUNT);
2176 #endif
2177 Base = Spi_Ip_apxBases[Instance];
2178 State = Spi_Ip_apxStateStructureArray[Instance];
2179 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2180 DevAssert(State != NULL_PTR);
2181 #endif
2182 SchM_Enter_Spi_SPI_EXCLUSIVE_AREA_10();
2183 if(SPI_IP_BUSY == State->Status)
2184 {
2185 /* Halt before update RSER */
2186 Base->MCR |= SPI_MCR_HALT_MASK;
2187 /* Disable interrupts and DMA requests. */
2188 Base->RSER = 0U;
2189 #if (SPI_IP_DMA_USED == STD_ON)
2190 if((boolean)TRUE == State->PhyUnitConfig->DmaUsed)
2191 {
2192 /* Disable all HW request */
2193 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->RxDmaChannel, DMA_IP_CH_CLEAR_HARDWARE_REQUEST);
2194 #if (SPI_IP_SLAVE_SUPPORT == STD_ON)
2195 if((boolean)FALSE == State->PhyUnitConfig->SlaveMode)
2196 #endif
2197 {
2198 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxCmdDmaChannel, DMA_IP_CH_CLEAR_HARDWARE_REQUEST);
2199 }
2200 (void)Dma_Ip_SetLogicChannelCommand(State->PhyUnitConfig->TxDmaChannel, DMA_IP_CH_CLEAR_HARDWARE_REQUEST);
2201 }
2202 #endif
2203 /* Clear FIFO */
2204 Base->MCR |= (SPI_MCR_CLR_RXF_MASK | SPI_MCR_CLR_TXF_MASK);
2205 /* set State to idle */
2206 State->Status = SPI_IP_IDLE;
2207 }
2208 SchM_Exit_Spi_SPI_EXCLUSIVE_AREA_10();
2209 }
2210
2211 /**
2212 * @brief This function is called by SPI ISRs.
2213 * @details This function will process activities for flags TCF, RFOF and TFUF.
2214 *
2215 * @param[in] Instance Instance of the hardware unit.
2216 *
2217 * @implements Spi_Ip_IrqHandler_Activity
2218 */
Spi_Ip_IrqHandler(uint8 Instance)2219 void Spi_Ip_IrqHandler(uint8 Instance)
2220 {
2221 SPI_Type* Base = Spi_Ip_apxBases[Instance];
2222 const Spi_Ip_StateStructureType* State = Spi_Ip_apxStateStructureArray[Instance];
2223 uint32 IrqFlags = 0u;
2224
2225 if(NULL_PTR != State)
2226 {
2227 /* the driver has been initialized */
2228 IrqFlags = Base->SR & (SPI_SR_TCF_MASK | SPI_SR_RFOF_MASK | SPI_SR_TFUF_MASK);
2229 IrqFlags &= Base->RSER & (SPI_RSER_TCF_RE_MASK | SPI_RSER_RFOF_RE_MASK | SPI_RSER_TFUF_RE_MASK);
2230 if(0u != IrqFlags)
2231 {
2232 Spi_Ip_TransferProcess(Instance);
2233 }
2234 else
2235 {
2236 /* Do nothing - Return immediately */
2237 }
2238 }
2239 else
2240 {
2241 /* the driver has not been initialized */
2242 /* clear all flags */
2243 Base->SR &= 0xFFFF0000u;
2244 }
2245 }
2246
2247 /**
2248 * @brief This function returns the status of the SPI driver.
2249 * @details This function returns the status of the SPI driver.
2250 *
2251 * @return Spi_Ip_HwStatusType
2252 *
2253 * @param[in] Instance Instance of the hardware unit.
2254 */
Spi_Ip_GetStatus(uint8 Instance)2255 Spi_Ip_HwStatusType Spi_Ip_GetStatus(uint8 Instance)
2256 {
2257 const Spi_Ip_StateStructureType* State;
2258 Spi_Ip_HwStatusType Status = SPI_IP_UNINIT;
2259
2260 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2261 DevAssert(Instance < SPI_INSTANCE_COUNT);
2262 #endif
2263 State = Spi_Ip_apxStateStructureArray[Instance];
2264 if (State != NULL_PTR)
2265 {
2266 Status = State->Status;
2267 }
2268 return Status;
2269 }
2270
2271 #if (SPI_IP_DUAL_CLOCK_MODE == STD_ON)
Spi_Ip_SetClockMode(uint8 Instance,Spi_Ip_DualClockModeType ClockMode)2272 Spi_Ip_StatusType Spi_Ip_SetClockMode(uint8 Instance, Spi_Ip_DualClockModeType ClockMode)
2273 {
2274 Spi_Ip_StateStructureType* State;
2275 Spi_Ip_StatusType Status = SPI_IP_STATUS_SUCCESS;
2276
2277 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2278 DevAssert(Instance < SPI_INSTANCE_COUNT);
2279 #endif
2280 State = Spi_Ip_apxStateStructureArray[Instance];
2281 #if (SPI_IP_DEV_ERROR_DETECT == STD_ON)
2282 DevAssert(State != NULL_PTR);
2283 #endif
2284 /* Clock mode can be changed when no transfers are in progress. */
2285 if (State->Status != SPI_IP_BUSY)
2286 {
2287 State->ClockMode = ClockMode;
2288 }
2289 else
2290 {
2291 Status = SPI_IP_STATUS_FAIL;
2292 }
2293 return Status;
2294 }
2295 #endif
2296
2297 #define SPI_STOP_SEC_CODE
2298 #include "Spi_MemMap.h"
2299
2300 #ifdef __cplusplus
2301 }
2302 #endif
2303
2304 /** @} */
2305