1 /*
2 * Copyright 2019-2023 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include "fsl_enet_qos.h"
8 /*******************************************************************************
9 * Definitions
10 ******************************************************************************/
11
12 /* Component ID definition, used by tools. */
13 #ifndef FSL_COMPONENT_ID
14 #define FSL_COMPONENT_ID "platform.drivers.enet_qos"
15 #endif
16
17 /*! @brief Defines 10^9 nanosecond. */
18 #define ENET_QOS_NANOSECS_ONESECOND (1000000000U)
19 /*! @brief Defines 10^6 microsecond.*/
20 #define ENET_QOS_MICRSECS_ONESECOND (1000000U)
21
22 /*! @brief Rx buffer LSB ignore bits. */
23 #define ENET_QOS_RXBUFF_IGNORELSB_BITS (3U)
24 /*! @brief ENET FIFO size unit. */
25 #define ENET_QOS_FIFOSIZE_UNIT (256U)
26 /*! @brief ENET half-dulpex default IPG. */
27 #define ENET_QOS_HALFDUPLEX_DEFAULTIPG (4U)
28 /*! @breif ENET miminum ring length. */
29 #define ENET_QOS_MIN_RINGLEN (4U)
30 /*! @breif ENET wakeup filter numbers. */
31 #define ENET_QOS_WAKEUPFILTER_NUM (8U)
32 /*! @breif Requried systime timer frequency. */
33 #define ENET_QOS_SYSTIME_REQUIRED_CLK_MHZ (50U)
34 /*! @brief Ethernet VLAN tag length. */
35 #define ENET_QOS_FRAME_VLAN_TAGLEN 4U
36
37 /*! @brief AVB TYPE */
38 #define ENET_QOS_AVBTYPE 0x22F0U
39 #define ENET_QOS_HEAD_TYPE_OFFSET (12)
40 #define ENET_QOS_HEAD_AVBTYPE_OFFSET (16)
41
42 /*! @brief Defines the macro for converting constants from host byte order to network byte order. */
43 #define ENET_QOS_HTONS(n) __REV16(n)
44 #define ENET_QOS_HTONL(n) __REV(n)
45 #define ENET_QOS_NTOHS(n) __REV16(n)
46 #define ENET_QOS_NTOHL(n) __REV(n)
47
48 #define ENET_QOS_DMA_CHX_RX_CTRL_RBSZ
49 /*******************************************************************************
50 * Prototypes
51 ******************************************************************************/
52
53 /*! @brief Mask the cache management code if cache control is disabled. */
54 #if !defined(FSL_ETH_ENABLE_CACHE_CONTROL)
55 #define ENET_QOS_DcacheInvalidateByRange(address, sizeByte)
56 #else
57 #define ENET_QOS_DcacheInvalidateByRange(address, sizeByte) DCACHE_InvalidateByRange(address, sizeByte)
58 #endif
59
60 /*!
61 * @brief Increase the index in the ring.
62 *
63 * @param index The current index.
64 * @param max The size.
65 * @return the increased index.
66 */
67 static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max);
68
69 /*!
70 * @brief Poll status flag.
71 *
72 * @param regAddr The register address to read out status
73 * @param mask The mask to operate the register value.
74 * @param readyStatus Indicate readyStatus for the field
75 * @retval kStatus_Success Poll readyStatus Success.
76 * @retval kStatus_ENET_QOS_Timeout Poll readyStatus timeout.
77 */
78 static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus);
79
80 /*!
81 * @brief Set ENET DMA controller with the configuration.
82 *
83 * @param base ENET peripheral base address.
84 * @param config ENET Mac configuration.
85 */
86 static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config);
87
88 /*!
89 * @brief Set ENET MAC controller with the configuration.
90 *
91 * @param base ENET peripheral base address.
92 * @param config ENET Mac configuration.
93 * @param macAddr ENET six-byte mac address.
94 */
95 static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
96 const enet_qos_config_t *config,
97 uint8_t *macAddr,
98 uint8_t macCount);
99 /*!
100 * @brief Set ENET MTL with the configuration.
101 *
102 * @param base ENET peripheral base address.
103 * @param config ENET Mac configuration.
104 */
105 static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config);
106
107 /*!
108 * @brief Set ENET DMA transmit buffer descriptors for one channel.
109 *
110 * @param base ENET peripheral base address.
111 * @param bufferConfig ENET buffer configuration.
112 * @param intTxEnable tx interrupt enable.
113 * @param channel The channel number, 0 , 1.
114 */
115 static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
116 const enet_qos_buffer_config_t *bufferConfig,
117 bool intTxEnable,
118 uint8_t channel);
119
120 /*!
121 * @brief Set ENET DMA receive buffer descriptors for one channel.
122 *
123 * @param base ENET peripheral base address.
124 * @param bufferConfig ENET buffer configuration.
125 * @param intRxEnable tx interrupt enable.
126 * @param channel The channel number, 0, 1.
127 */
128 static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
129 enet_qos_config_t *config,
130 const enet_qos_buffer_config_t *bufferConfig,
131 bool intRxEnable,
132 uint8_t channel);
133
134 /*!
135 * @brief Sets the ENET 1588 feature.
136 *
137 * Enable the enhacement 1588 buffer descriptor mode and start
138 * the 1588 timer.
139 *
140 * @param base ENET peripheral base address.
141 * @param config The ENET configuration.
142 * @param refClk_Hz The reference clock for ptp 1588.
143 */
144 static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz);
145
146 /*!
147 * @brief Store the receive time-stamp for event PTP frame in the time-stamp buffer ring.
148 *
149 * @param base ENET peripheral base address.
150 * @param handle ENET handler.
151 * @param rxDesc The ENET receive descriptor pointer.
152 * @param channel The rx channel.
153 * @param ts The timestamp structure pointer.
154 */
155 static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
156 enet_qos_handle_t *handle,
157 enet_qos_rx_bd_struct_t *rxDesc,
158 // uint8_t channel,
159 enet_qos_ptp_time_t *ts);
160
161 /*!
162 * @brief Check if txDirtyRing available.
163 *
164 * @param txDirtyRing pointer to txDirtyRing
165 * @retval txDirty available status.
166 */
167 static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing);
168
169 /*******************************************************************************
170 * Variables
171 ******************************************************************************/
172 /*! @brief Pointers to enet bases for each instance. */
173 static ENET_QOS_Type *const s_enetqosBases[] = ENET_QOS_BASE_PTRS;
174
175 /*! @brief Pointers to enet IRQ number for each instance. */
176 static const IRQn_Type s_enetqosIrqId[] = ENET_QOS_IRQS;
177
178 /* ENET ISR for transactional APIs. */
179 static enet_qos_isr_t s_enetqosIsr;
180
181 /*! @brief Pointers to enet handles for each instance. */
182 static enet_qos_handle_t *s_ENETHandle[ARRAY_SIZE(s_enetqosBases)] = {NULL};
183
184 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
185 /*! @brief Pointers to enet clocks for each instance. */
186 const clock_ip_name_t s_enetqosClock[ARRAY_SIZE(s_enetqosBases)] = ENETQOS_CLOCKS;
187 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
188
189 /*******************************************************************************
190 * Code
191 ******************************************************************************/
192
ENET_QOS_PollStatusFlag(volatile uint32_t * regAddr,uint32_t mask,uint32_t readyStatus)193 static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus)
194 {
195 uint8_t retryTimes = 10U;
196 status_t result = kStatus_Success;
197
198 while ((readyStatus != (*regAddr & mask)) && (0U != retryTimes))
199 {
200 retryTimes--;
201 SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
202 }
203
204 if (retryTimes == 0U)
205 {
206 result = kStatus_ENET_QOS_Timeout;
207 }
208
209 return result;
210 }
211
212 /*!
213 * brief Sets the ENET AVB feature.
214 *
215 * ENET_QOS AVB feature configuration, set transmit bandwidth.
216 * This API is called when the AVB feature is required.
217 *
218 * param base ENET_QOS peripheral base address.
219 * param config The ENET_QOS AVB feature configuration structure.
220 * param queueIndex ENET_QOS queue index.
221 */
ENET_QOS_AVBConfigure(ENET_QOS_Type * base,const enet_qos_cbs_config_t * config,uint8_t queueIndex)222 void ENET_QOS_AVBConfigure(ENET_QOS_Type *base, const enet_qos_cbs_config_t *config, uint8_t queueIndex)
223 {
224 assert(config != NULL);
225
226 /* Enable AV algorithm */
227 base->MTL_QUEUE[queueIndex].MTL_TXQX_ETS_CTRL |= ENET_QOS_MTL_TXQX_ETS_CTRL_AVALG_MASK;
228 /* Configure send slope */
229 base->MTL_QUEUE[queueIndex].MTL_TXQX_SNDSLP_CRDT = config->sendSlope;
230 /* Configure idle slope (same register as tx weight) */
231 base->MTL_QUEUE[queueIndex].MTL_TXQX_QNTM_WGHT = config->idleSlope;
232 /* Configure high credit */
233 base->MTL_QUEUE[queueIndex].MTL_TXQX_HI_CRDT = config->highCredit;
234 /* Configure high credit */
235 base->MTL_QUEUE[queueIndex].MTL_TXQX_LO_CRDT = config->lowCredit;
236 }
237
ENET_QOS_IncreaseIndex(uint16_t index,uint16_t max)238 static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max)
239 {
240 /* Increase the index. */
241 index++;
242 if (index >= max)
243 {
244 index = 0;
245 }
246 return index;
247 }
248
ENET_QOS_ReverseBits(uint32_t value)249 static uint32_t ENET_QOS_ReverseBits(uint32_t value)
250 {
251 value = ((value & 0x55555555UL) << 1U) | ((value >> 1U) & 0x55555555UL);
252 value = ((value & 0x33333333UL) << 2U) | ((value >> 2U) & 0x33333333UL);
253 value = ((value & 0x0F0F0F0FUL) << 4U) | ((value >> 4U) & 0x0F0F0F0FUL);
254
255 return (value >> 24U) | ((value >> 8U) & 0xFF00UL) | ((value & 0xFF00UL) << 8U) | (value << 24U);
256 }
257
ENET_QOS_SetDMAControl(ENET_QOS_Type * base,const enet_qos_config_t * config)258 static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config)
259 {
260 assert(config != NULL);
261
262 uint8_t index;
263 uint32_t reg;
264 uint32_t burstLen;
265
266 if (kENET_QOS_RmiiMode == config->miiMode)
267 {
268 /* Disable enet qos clock first. */
269 ENET_QOS_EnableClock(false);
270 }
271 else
272 {
273 /* Enable enet qos clock. */
274 ENET_QOS_EnableClock(true);
275 }
276 /* Set MII mode*/
277 ENET_QOS_SetSYSControl(config->miiMode);
278
279 /* Reset first, The reset bit will automatically be cleared after complete. */
280 base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
281 for (uint32_t i = 0U; i < 100UL; i++)
282 {
283 __NOP();
284 }
285 if (kENET_QOS_RmiiMode == config->miiMode)
286 {
287 /* Configure mac */
288 reg = ENET_QOS_MAC_CONFIGURATION_DM(config->miiDuplex) | (uint32_t)config->miiSpeed |
289 ENET_QOS_MAC_CONFIGURATION_S2KP(
290 ((config->specialControl & (uint32_t)kENET_QOS_8023AS2KPacket) != 0U) ? 1U : 0U);
291 if (config->miiDuplex == kENET_QOS_MiiHalfDuplex)
292 {
293 reg |= ENET_QOS_MAC_CONFIGURATION_IPG(ENET_QOS_HALFDUPLEX_DEFAULTIPG);
294 }
295 base->MAC_CONFIGURATION = reg;
296 /* Enable enet qos clock. */
297 ENET_QOS_EnableClock(true);
298 for (uint32_t i = 0U; i < 100UL; i++)
299 {
300 __NOP();
301 }
302 }
303 /* Wait for the complete */
304 while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
305 {
306 }
307
308 /* Set the burst length. */
309 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
310 {
311 burstLen = (uint32_t)kENET_QOS_BurstLen1;
312 if (config->multiqueueCfg != NULL)
313 {
314 burstLen = (uint32_t)config->multiqueueCfg->burstLen;
315 }
316 base->DMA_CH[index].DMA_CHX_CTRL = burstLen & ENET_QOS_DMA_CHX_CTRL_PBLx8_MASK;
317
318 reg = base->DMA_CH[index].DMA_CHX_TX_CTRL & ~ENET_QOS_DMA_CHX_TX_CTRL_TxPBL_MASK;
319 base->DMA_CH[index].DMA_CHX_TX_CTRL = reg | ENET_QOS_DMA_CHX_TX_CTRL_TxPBL(burstLen & 0x3FU);
320
321 reg = base->DMA_CH[index].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RxPBL_MASK;
322 base->DMA_CH[index].DMA_CHX_RX_CTRL = reg | ENET_QOS_DMA_CHX_RX_CTRL_RxPBL(burstLen & 0x3FU);
323 }
324 }
325
ENET_QOS_SetMTL(ENET_QOS_Type * base,const enet_qos_config_t * config)326 static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config)
327 {
328 assert(config != NULL);
329
330 uint32_t txqOpreg = 0;
331 uint32_t rxqOpReg = 0;
332 enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
333 uint8_t index;
334
335 /* Set transmit operation mode. */
336 if ((config->specialControl & (uint32_t)kENET_QOS_StoreAndForward) != 0U)
337 {
338 txqOpreg = ENET_QOS_MTL_TXQX_OP_MODE_TSF_MASK;
339 rxqOpReg = ENET_QOS_MTL_RXQX_OP_MODE_RSF_MASK;
340 }
341 /* Set transmit operation mode. */
342 txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
343 /* Set receive operation mode. */
344 rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_FUP_MASK | ENET_QOS_MTL_RXQX_OP_MODE_RFD(3U) |
345 ENET_QOS_MTL_RXQX_OP_MODE_RFA(1U) | ENET_QOS_MTL_RXQX_OP_MODE_EHFC_MASK;
346
347 if (multiqCfg == NULL)
348 {
349 txqOpreg |=
350 ENET_QOS_MTL_TXQX_OP_MODE_TQS(((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
351 rxqOpReg |=
352 ENET_QOS_MTL_RXQX_OP_MODE_RQS(((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
353 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE = txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)kENET_QOS_DCB_Mode);
354 base->MTL_QUEUE[0].MTL_RXQX_OP_MODE = rxqOpReg;
355 }
356 else
357 {
358 /* Set the schedule/arbitration(set for multiple queues). */
359 base->MTL_OPERATION_MODE = ENET_QOS_MTL_OPERATION_MODE_SCHALG(multiqCfg->mtltxSche) |
360 ENET_QOS_MTL_OPERATION_MODE_RAA(multiqCfg->mtlrxSche);
361
362 for (index = 0; index < multiqCfg->txQueueUse; index++)
363 {
364 txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_TQS(
365 ((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / ((uint32_t)multiqCfg->txQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
366 base->MTL_QUEUE[index].MTL_TXQX_OP_MODE =
367 txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)multiqCfg->txQueueConfig[index].mode);
368 if (multiqCfg->txQueueConfig[index].mode == kENET_QOS_AVB_Mode)
369 {
370 ENET_QOS_AVBConfigure(base, multiqCfg->txQueueConfig[index].cbsConfig, index);
371 }
372 else
373 {
374 base->MTL_QUEUE[index].MTL_TXQX_QNTM_WGHT = multiqCfg->txQueueConfig[index].weight;
375 }
376 }
377
378 volatile uint32_t *mtlrxQuemapReg;
379 uint8_t configIndex;
380 for (index = 0; index < multiqCfg->rxQueueUse; index++)
381 {
382 rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_RQS(
383 ((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / ((uint32_t)multiqCfg->rxQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
384 base->MTL_QUEUE[index].MTL_RXQX_OP_MODE = rxqOpReg;
385 mtlrxQuemapReg = (index < 4U) ? &base->MTL_RXQ_DMA_MAP0 : &base->MTL_RXQ_DMA_MAP1;
386 configIndex = (index & 0x3U);
387 *mtlrxQuemapReg &= ~((uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH_MASK << (8U * configIndex));
388 *mtlrxQuemapReg |= (uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH(multiqCfg->rxQueueConfig[index].mapChannel)
389 << (8U * configIndex);
390 }
391 }
392 }
393
ENET_QOS_SetMacControl(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount)394 static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
395 const enet_qos_config_t *config,
396 uint8_t *macAddr,
397 uint8_t macCount)
398 {
399 assert(config != NULL);
400
401 uint32_t reg = 0;
402
403 /* Set Macaddr */
404 /* The dma channel 0 is set as to which the rx packet
405 * whose DA matches the MAC address content is routed. */
406 if (macAddr != NULL)
407 {
408 for (uint8_t i = 0; i < macCount; i++)
409 {
410 ENET_QOS_SetMacAddr(base, macAddr, i);
411 }
412 }
413
414 /* Set the receive filter. */
415 reg =
416 ENET_QOS_MAC_PACKET_FILTER_PR(((config->specialControl & (uint32_t)kENET_QOS_PromiscuousEnable) != 0U) ? 1U :
417 0U) |
418 ENET_QOS_MAC_PACKET_FILTER_DBF(((config->specialControl & (uint32_t)kENET_QOS_BroadCastRxDisable) != 0U) ? 1U :
419 0U) |
420 ENET_QOS_MAC_PACKET_FILTER_PM(((config->specialControl & (uint32_t)kENET_QOS_MulticastAllEnable) != 0U) ? 1U :
421 0U) |
422 ENET_QOS_MAC_PACKET_FILTER_HMC(((config->specialControl & (uint32_t)kENET_QOS_HashMulticastEnable) != 0U) ? 1U :
423 0U);
424 base->MAC_PACKET_FILTER = reg;
425 /* Flow control. */
426 if ((config->specialControl & (uint32_t)kENET_QOS_FlowControlEnable) != 0U)
427 {
428 base->MAC_RX_FLOW_CTRL = ENET_QOS_MAC_RX_FLOW_CTRL_RFE_MASK | ENET_QOS_MAC_RX_FLOW_CTRL_UP_MASK;
429 base->MAC_TX_FLOW_CTRL_Q[0] = ENET_QOS_MAC_TX_FLOW_CTRL_Q_PT(config->pauseDuration);
430 }
431
432 /* Set the 1us ticket. */
433 reg = config->csrClock_Hz / ENET_QOS_MICRSECS_ONESECOND - 1U;
434 base->MAC_ONEUS_TIC_COUNTER = ENET_QOS_MAC_ONEUS_TIC_COUNTER_TIC_1US_CNTR(reg);
435
436 /* Set the speed and duplex. */
437 reg = ENET_QOS_MAC_CONFIGURATION_DM(config->miiDuplex) | (uint32_t)config->miiSpeed |
438 ENET_QOS_MAC_CONFIGURATION_S2KP(((config->specialControl & (uint32_t)kENET_QOS_8023AS2KPacket) != 0U) ? 1U :
439 0U) |
440 ENET_QOS_MAC_CONFIGURATION_IPC(
441 ((config->specialControl & (uint32_t)kENET_QOS_RxChecksumOffloadEnable) != 0U) ? 1U : 0U);
442 if (config->miiDuplex == kENET_QOS_MiiHalfDuplex)
443 {
444 reg |= ENET_QOS_MAC_CONFIGURATION_IPG(ENET_QOS_HALFDUPLEX_DEFAULTIPG);
445 }
446 base->MAC_CONFIGURATION = reg;
447
448 if (config->multiqueueCfg != NULL)
449 {
450 reg = 0U;
451 uint8_t configIndex;
452 enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
453 uint32_t txQueuePrioMap0 = base->MAC_TXQ_PRTY_MAP0;
454 uint32_t txQueuePrioMap1 = base->MAC_TXQ_PRTY_MAP1;
455 uint32_t rxQueuePrioMap0 = base->MAC_RXQ_CTRL[2];
456 uint32_t rxQueuePrioMap1 = base->MAC_RXQ_CTRL[3];
457 uint32_t rxCtrlReg1 = base->MAC_RXQ_CTRL[1];
458
459 for (uint8_t index = 0U; index < multiqCfg->txQueueUse; index++)
460 {
461 configIndex = index & 0x3U;
462
463 /* Configure tx queue priority. */
464 if (index < 4U)
465 {
466 txQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
467 txQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
468 << (8U * configIndex);
469 }
470 else
471 {
472 txQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
473 txQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
474 << (8U * configIndex);
475 }
476 }
477
478 for (uint8_t index = 0U; index < multiqCfg->rxQueueUse; index++)
479 {
480 configIndex = index & 0x3U;
481
482 /* Configure rx queue priority. */
483 if (index < 4U)
484 {
485 rxQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
486 rxQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
487 << (8U * configIndex);
488 }
489 else
490 {
491 rxQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
492 rxQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
493 << (8U * configIndex);
494 }
495
496 /* Configure queue enable mode. */
497 reg |= ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)multiqCfg->rxQueueConfig[index].mode) << (2U * index);
498
499 /* Configure rx queue routing */
500 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketAVCPQ) != 0U)
501 {
502 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_AVCPQ_MASK;
503 rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_AVCPQ(index) | ENET_QOS_MAC_RXQ_CTRL_TACPQE_MASK);
504 }
505
506 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketPTPQ) != 0U)
507 {
508 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_PTPQ_MASK;
509 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_PTPQ(index);
510 }
511
512 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketDCBCPQ) != 0U)
513 {
514 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_DCBCPQ_MASK;
515 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_DCBCPQ(index);
516 }
517
518 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketUPQ) != 0U)
519 {
520 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_UPQ_MASK;
521 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_UPQ(index);
522 }
523
524 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketMCBCQ) != 0U)
525 {
526 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_MCBCQ_MASK;
527 rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_MCBCQ(index) | ENET_QOS_MAC_RXQ_CTRL_MCBCQEN_MASK);
528 }
529 }
530
531 base->MAC_TXQ_PRTY_MAP0 = txQueuePrioMap0;
532 base->MAC_TXQ_PRTY_MAP1 = txQueuePrioMap1;
533 base->MAC_RXQ_CTRL[2] = rxQueuePrioMap0;
534 base->MAC_RXQ_CTRL[3] = rxQueuePrioMap1;
535 base->MAC_RXQ_CTRL[1] = rxCtrlReg1;
536 }
537 else
538 {
539 /* Configure queue enable mode. */
540 reg = ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)kENET_QOS_DCB_Mode);
541 }
542
543 /* Enable queue. */
544 base->MAC_RXQ_CTRL[0] = reg;
545
546 /* Mask MMC counters interrupts as we don't handle
547 * them in the interrupt handler.
548 */
549 base->MAC_MMC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
550 base->MAC_MMC_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
551 base->MAC_MMC_IPC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
552 base->MAC_MMC_FPE_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
553 base->MAC_MMC_FPE_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
554 }
555
ENET_QOS_TxDescriptorsInit(ENET_QOS_Type * base,const enet_qos_buffer_config_t * bufferConfig,bool intTxEnable,uint8_t channel)556 static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
557 const enet_qos_buffer_config_t *bufferConfig,
558 bool intTxEnable,
559 uint8_t channel)
560 {
561 uint16_t j;
562 enet_qos_tx_bd_struct_t *txbdPtr;
563 uint32_t control = intTxEnable ? ENET_QOS_TXDESCRIP_RD_IOC_MASK : 0U;
564 const enet_qos_buffer_config_t *buffCfg = bufferConfig;
565 uint32_t txDescAddr, txDescTail;
566
567 if (buffCfg == NULL)
568 {
569 return kStatus_InvalidArgument;
570 }
571
572 /* Check the ring length. */
573 if (buffCfg->txRingLen < ENET_QOS_MIN_RINGLEN)
574 {
575 return kStatus_InvalidArgument;
576 }
577 /* Set the tx descriptor start/tail pointer, shall be word aligned. */
578 txDescAddr = (uint32_t)(uintptr_t)buffCfg->txDescStartAddrAlign & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
579 txDescTail = (uint32_t)(uintptr_t)buffCfg->txDescTailAddrAlign & ENET_QOS_DMA_CHX_TXDESC_TAIL_PTR_TDTP_MASK;
580 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
581 txDescAddr = MEMORY_ConvertMemoryMapAddress(txDescAddr, kMEMORY_Local2DMA);
582 txDescTail = MEMORY_ConvertMemoryMapAddress(txDescTail, kMEMORY_Local2DMA);
583 #endif
584 base->DMA_CH[channel].DMA_CHX_TXDESC_LIST_ADDR = txDescAddr;
585 base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR = txDescTail;
586 /* Set the tx ring length. */
587 base->DMA_CH[channel].DMA_CHX_TXDESC_RING_LENGTH =
588 ((uint32_t)buffCfg->txRingLen - 1U) & ENET_QOS_DMA_CHX_TXDESC_RING_LENGTH_TDRL_MASK;
589
590 /* Init the txbdPtr to the transmit descriptor start address. */
591 txbdPtr = (enet_qos_tx_bd_struct_t *)(buffCfg->txDescStartAddrAlign);
592 for (j = 0; j < buffCfg->txRingLen; j++)
593 {
594 txbdPtr->buff1Addr = 0;
595 txbdPtr->buff2Addr = 0;
596 txbdPtr->buffLen = control;
597 txbdPtr->controlStat = 0;
598 txbdPtr++;
599 }
600
601 return kStatus_Success;
602 }
603
ENET_QOS_RxDescriptorsInit(ENET_QOS_Type * base,enet_qos_config_t * config,const enet_qos_buffer_config_t * bufferConfig,bool intRxEnable,uint8_t channel)604 static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
605 enet_qos_config_t *config,
606 const enet_qos_buffer_config_t *bufferConfig,
607 bool intRxEnable,
608 uint8_t channel)
609 {
610 uint16_t j;
611 uint32_t reg;
612 enet_qos_rx_bd_struct_t *rxbdPtr;
613 uint16_t index;
614 bool doubleBuffEnable = ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U) ? true : false;
615 const enet_qos_buffer_config_t *buffCfg = bufferConfig;
616 uint32_t control = ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
617 uint32_t rxDescAddr, rxDescTail;
618
619 if (buffCfg == NULL)
620 {
621 return kStatus_InvalidArgument;
622 }
623
624 if (intRxEnable)
625 {
626 control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
627 }
628
629 if (doubleBuffEnable)
630 {
631 control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
632 }
633
634 /* Not give ownership to DMA before Rx buffer is ready */
635 if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
636 {
637 control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
638 }
639
640 /* Check the ring length. */
641 if (buffCfg->rxRingLen < ENET_QOS_MIN_RINGLEN)
642 {
643 return kStatus_InvalidArgument;
644 }
645
646 /* Set the rx descriptor start/tail pointer, shall be word aligned. */
647 rxDescAddr = (uint32_t)(uintptr_t)buffCfg->rxDescStartAddrAlign & ENET_QOS_DMA_CHX_RXDESC_LIST_ADDR_RDESLA_MASK;
648 rxDescTail = (uint32_t)(uintptr_t)buffCfg->rxDescTailAddrAlign & ENET_QOS_DMA_CHX_RXDESC_TAIL_PTR_RDTP_MASK;
649 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
650 rxDescAddr = MEMORY_ConvertMemoryMapAddress(rxDescAddr, kMEMORY_Local2DMA);
651 rxDescTail = MEMORY_ConvertMemoryMapAddress(rxDescTail, kMEMORY_Local2DMA);
652 #endif
653 base->DMA_CH[channel].DMA_CHX_RXDESC_LIST_ADDR = rxDescAddr;
654 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = rxDescTail;
655 /* Register DMA_CHX_RXDESC_RING_LENGTH renamed to DMA_CHX_RX_CONTROL2 */
656 #if defined(ENET_QOS_DMA_CHX_RX_CONTROL2_COUNT) && ENET_QOS_DMA_CHX_RX_CONTROL2_COUNT
657 base->DMA_CH[channel].DMA_CHX_RX_CONTROL2 =
658 ((uint32_t)buffCfg->rxRingLen - 1U) & ENET_QOS_DMA_CHX_RX_CONTROL2_RDRL_MASK;
659 #else
660 base->DMA_CH[channel].DMA_CHX_RXDESC_RING_LENGTH =
661 ((uint32_t)buffCfg->rxRingLen - 1U) & ENET_QOS_DMA_CHX_RXDESC_RING_LENGTH_RDRL_MASK;
662 #endif
663 reg = base->DMA_CH[channel].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y_MASK;
664 reg |= ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y(buffCfg->rxBuffSizeAlign >> ENET_QOS_RXBUFF_IGNORELSB_BITS);
665 base->DMA_CH[channel].DMA_CHX_RX_CTRL = reg;
666
667 /* Init the rxbdPtr to the receive descriptor start address. */
668 rxbdPtr = (enet_qos_rx_bd_struct_t *)(buffCfg->rxDescStartAddrAlign);
669 for (j = 0U; j < buffCfg->rxRingLen; j++)
670 {
671 if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
672 {
673 if (doubleBuffEnable)
674 {
675 index = 2U * j;
676 }
677 else
678 {
679 index = j;
680 }
681
682 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
683 buffCfg->rxBufferStartAddr[index] =
684 MEMORY_ConvertMemoryMapAddress((uintptr_t)buffCfg->rxBufferStartAddr[index], kMEMORY_Local2DMA);
685 #endif
686 rxbdPtr->buff1Addr = buffCfg->rxBufferStartAddr[index];
687
688 /* The second buffer is set with 0 because it is not required for normal case. */
689 if (doubleBuffEnable)
690 {
691 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
692 buffCfg->rxBufferStartAddr[index + 1U] = MEMORY_ConvertMemoryMapAddress(
693 (uintptr_t)buffCfg->rxBufferStartAddr[index + 1U], kMEMORY_Local2DMA);
694 #endif
695 rxbdPtr->buff2Addr = buffCfg->rxBufferStartAddr[index + 1U];
696 }
697 else
698 {
699 rxbdPtr->buff2Addr = 0;
700 }
701 }
702
703 /* Set the valid and DMA own flag.*/
704 rxbdPtr->control = control;
705 rxbdPtr++;
706 }
707
708 return kStatus_Success;
709 }
710
ENET_QOS_SetPtp1588(ENET_QOS_Type * base,const enet_qos_config_t * config,uint32_t refClk_Hz)711 static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz)
712 {
713 assert(config != NULL);
714 assert(config->ptpConfig != NULL);
715 assert(refClk_Hz != 0U);
716
717 uint32_t control = 0U;
718 status_t result = kStatus_Success;
719 enet_qos_ptp_config_t *ptpConfig = config->ptpConfig;
720 uint32_t ptpClk_Hz = refClk_Hz;
721 uint32_t ssInc, snsSinc;
722
723 /* Clear the timestamp interrupt first. */
724 base->MAC_INTERRUPT_ENABLE &= ~ENET_QOS_MAC_INTERRUPT_ENABLE_TSIE_MASK;
725
726 if (ptpConfig->fineUpdateEnable)
727 {
728 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCFUPDT_MASK;
729 ptpClk_Hz = ptpConfig->systemTimeClock_Hz; /* PTP clock 50MHz. */
730 }
731
732 /* Enable the IEEE 1588 timestamping and snapshot for event message. */
733 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV4ENA_MASK |
734 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV6ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENALL_MASK |
735 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSEVNTENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_SNAPTYPSEL_MASK |
736 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR(ptpConfig->tsRollover);
737
738 if (ptpConfig->ptp1588V2Enable)
739 {
740 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSVER2ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPENA_MASK;
741 }
742
743 /* Initialize the sub-second increment register. */
744 if (ptpConfig->tsRollover == kENET_QOS_DigitalRollover)
745 {
746 ssInc = (uint32_t)(((uint64_t)ENET_QOS_NANOSECS_ONESECOND << 8U) / ptpClk_Hz);
747 }
748 else
749 {
750 ssInc = (uint32_t)((((uint64_t)ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK + 1U) << 8U) / ptpClk_Hz);
751 }
752
753 snsSinc = ssInc & 0xFFU;
754 ssInc = (ssInc >> 8U) & 0xFFU;
755
756 base->MAC_TIMESTAMP_CONTROL = control;
757
758 /* Initialize the system timer. */
759 base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = 0;
760
761 /* Set the second.*/
762 base->MAC_SYSTEM_TIME_SECONDS_UPDATE = 0;
763 base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS = 0;
764
765 /* Initialize the system timer. */
766 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK;
767
768 while ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK) != 0U)
769 {
770 }
771
772 base->MAC_SUB_SECOND_INCREMENT =
773 ENET_QOS_MAC_SUB_SECOND_INCREMENT_SSINC(ssInc) | ENET_QOS_MAC_SUB_SECOND_INCREMENT_SNSINC(snsSinc);
774
775 /* Set the initial added value for the fine update. */
776 if (ptpConfig->fineUpdateEnable)
777 {
778 result = ENET_QOS_Ptp1588CorrectTimerInFine(base, ptpConfig->defaultAddend);
779 }
780
781 return result;
782 }
783
ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t * txDirtyRing)784 static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing)
785 {
786 return !txDirtyRing->isFull;
787 }
788
ENET_QOS_StoreRxFrameTime(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_rx_bd_struct_t * rxDesc,enet_qos_ptp_time_t * ts)789 static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
790 enet_qos_handle_t *handle,
791 enet_qos_rx_bd_struct_t *rxDesc,
792 enet_qos_ptp_time_t *ts)
793 {
794 assert(ts != NULL);
795
796 uint32_t nanosecond;
797
798 /* Get transmit time stamp second. */
799 nanosecond = rxDesc->buff1Addr;
800 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
801 {
802 /* Binary rollover, 0.465ns accuracy. */
803 nanosecond = (uint32_t)(((uint64_t)nanosecond * 465U) / 1000U);
804 }
805 ts->second = rxDesc->reserved;
806 ts->nanosecond = nanosecond;
807 }
808
ENET_QOS_GetInstance(ENET_QOS_Type * base)809 uint32_t ENET_QOS_GetInstance(ENET_QOS_Type *base)
810 {
811 uint32_t instance;
812
813 /* Find the instance index from base address mappings. */
814 for (instance = 0; instance < ARRAY_SIZE(s_enetqosBases); instance++)
815 {
816 if (s_enetqosBases[instance] == base)
817 {
818 break;
819 }
820 }
821
822 assert(instance < ARRAY_SIZE(s_enetqosBases));
823
824 return instance;
825 }
826
827 /*!
828 * brief Gets the ENET default configuration structure.
829 *
830 * The purpose of this API is to get the default ENET configure
831 * structure for ENET_QOS_Init(). User may use the initialized
832 * structure unchanged in ENET_QOS_Init(), or modify some fields of the
833 * structure before calling ENET_QOS_Init().
834 * Example:
835 code
836 enet_qos_config_t config;
837 ENET_QOS_GetDefaultConfig(&config);
838 endcode
839 * param config The ENET mac controller configuration structure pointer.
840 */
ENET_QOS_GetDefaultConfig(enet_qos_config_t * config)841 void ENET_QOS_GetDefaultConfig(enet_qos_config_t *config)
842 {
843 /* Checks input parameter. */
844 assert(config != NULL);
845
846 /* Initializes the configure structure to zero. */
847 (void)memset(config, 0, sizeof(*config));
848
849 /* Sets RGMII mode, full duplex, 1000Mbps for MAC and PHY data interface. */
850 config->miiMode = kENET_QOS_RgmiiMode;
851 config->miiSpeed = kENET_QOS_MiiSpeed1000M;
852 config->miiDuplex = kENET_QOS_MiiFullDuplex;
853
854 /* Sets default configuration for other options. */
855 config->specialControl = 0;
856 config->multiqueueCfg = NULL;
857 config->pauseDuration = 0;
858
859 config->ptpConfig = NULL;
860 }
861
862 /*!
863 * brief Initializes the ENET module.
864 *
865 * This function set up the with ENET basic configuration.
866 *
867 * param base ENET peripheral base address.
868 * param config ENET mac configuration structure pointer.
869 * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
870 * can be used directly. It is also possible to verify the Mac configuration using other methods.
871 * param macAddr ENET mac address of Ethernet device. This MAC address should be
872 * provided.
873 * param refclkSrc_Hz ENET input reference clock.
874 */
ENET_QOS_Up(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount,uint32_t refclkSrc_Hz)875 status_t ENET_QOS_Up(
876 ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
877 {
878 assert(config != NULL);
879 status_t result = kStatus_Success;
880
881 /* Initializes the ENET MDIO. */
882 ENET_QOS_SetSMI(base, refclkSrc_Hz);
883
884 /* Initializes the ENET MTL with basic function. */
885 ENET_QOS_SetMTL(base, config);
886
887 /* Initializes the ENET MAC with basic function. */
888 ENET_QOS_SetMacControl(base, config, macAddr, macCount);
889
890 return result;
891 }
892
893 /*!
894 * brief Initializes the ENET module.
895 *
896 * This function ungates the module clock and initializes it with the ENET basic
897 * configuration.
898 *
899 * param base ENET peripheral base address.
900 * param config ENET mac configuration structure pointer.
901 * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
902 * can be used directly. It is also possible to verify the Mac configuration using other methods.
903 * param macAddr ENET mac address of Ethernet device. This MAC address should be
904 * provided.
905 * param refclkSrc_Hz ENET input reference clock.
906 */
ENET_QOS_Init(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount,uint32_t refclkSrc_Hz)907 status_t ENET_QOS_Init(
908 ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
909 {
910 assert(config != NULL);
911
912 status_t result = kStatus_Success;
913 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
914 uint32_t instance = ENET_QOS_GetInstance(base);
915
916 /* Ungate ENET clock. */
917 (void)CLOCK_EnableClock(s_enetqosClock[instance]);
918 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
919
920 /* Initializes the ENET DMA with basic function. */
921 ENET_QOS_SetDMAControl(base, config);
922
923 (void)ENET_QOS_Up(base, config, macAddr, macCount, refclkSrc_Hz);
924
925 if (config->ptpConfig != NULL)
926 {
927 result = ENET_QOS_SetPtp1588(base, config, refclkSrc_Hz);
928 }
929
930 return result;
931 }
932
933 /*!
934 * brief Stops the ENET module.
935
936 * This function disables the ENET module.
937 *
938 * param base ENET peripheral base address.
939 */
ENET_QOS_Down(ENET_QOS_Type * base)940 void ENET_QOS_Down(ENET_QOS_Type *base)
941 {
942 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
943 enet_qos_tx_bd_struct_t *txbdPtr;
944 uint8_t index;
945 uint32_t primask, j;
946 uint32_t txDescAddr;
947
948 /* Disable all interrupts */
949 ENET_QOS_DisableInterrupts(base, 0xFF);
950
951 for (index = 0; index < handle->txQueueUse; index++)
952 {
953 enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[index];
954 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[index];
955
956 /* Clear pending descriptors */
957 if (handle->callback != NULL)
958 {
959 while (txBdRing->txDescUsed > 0U)
960 {
961 enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
962
963 txDirty->isTsAvail = false;
964
965 handle->callback(base, handle, kENET_QOS_TxIntEvent, index, handle->userData);
966
967 primask = DisableGlobalIRQ();
968 txBdRing->txDescUsed--;
969 EnableGlobalIRQ(primask);
970 }
971 }
972
973 /* Disable Tx DMA */
974 base->DMA_CH[index].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
975
976 /* Flush Tx Queue */
977 base->MTL_QUEUE[index].MTL_TXQX_OP_MODE |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
978
979 /* Wait until Tx Queue is empty */
980 while ((base->MTL_QUEUE[index].MTL_TXQX_DBG &
981 (ENET_QOS_MTL_TXQX_DBG_TXQSTS_MASK | ENET_QOS_MTL_TXQX_DBG_PTXQ_MASK)) != 0U)
982 {
983 }
984
985 /* Reset hardware ring buffer */
986 txDescAddr =
987 (uint32_t)(uintptr_t)handle->txBdRing[index].txBdBase & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
988 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
989 txDescAddr = MEMORY_ConvertMemoryMapAddress(txDescAddr, kMEMORY_Local2DMA);
990 #endif
991 base->DMA_CH[index].DMA_CHX_TXDESC_LIST_ADDR = txDescAddr;
992
993 /* Reset software ring buffer */
994 handle->txBdRing[index].txGenIdx = 0;
995 handle->txBdRing[index].txConsumIdx = 0;
996 handle->txBdRing[index].txDescUsed = 0;
997
998 handle->txDirtyRing[index].txGenIdx = 0;
999 handle->txDirtyRing[index].txConsumIdx = 0;
1000 handle->txDirtyRing[index].isFull = false;
1001
1002 txbdPtr = (enet_qos_tx_bd_struct_t *)(handle->txBdRing[index].txBdBase);
1003 for (j = 0; j < handle->txBdRing[index].txRingLen; j++)
1004 {
1005 txbdPtr->buff1Addr = 0;
1006 txbdPtr->buff2Addr = 0;
1007 txbdPtr->buffLen = 0;
1008 txbdPtr->controlStat = 0;
1009 txbdPtr++;
1010 }
1011 }
1012
1013 /* Disable MAC Rx/Tx */
1014 base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
1015
1016 /* Disable Rx DMA */
1017 for (index = 0; index < handle->rxQueueUse; index++)
1018 {
1019 base->DMA_CH[index].DMA_CHX_RX_CTRL &= ~ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
1020 }
1021 }
1022
1023 /*!
1024 * brief Deinitializes the ENET module.
1025
1026 * This function gates the module clock and disables the ENET module.
1027 *
1028 * param base ENET peripheral base address.
1029 */
ENET_QOS_Deinit(ENET_QOS_Type * base)1030 void ENET_QOS_Deinit(ENET_QOS_Type *base)
1031 {
1032 /* Reset first and wait for the complete
1033 * The reset bit will automatically be cleared after complete. */
1034 base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
1035 while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
1036 {
1037 }
1038
1039 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
1040 /* Disables the clock source. */
1041 (void)CLOCK_DisableClock(s_enetqosClock[ENET_QOS_GetInstance(base)]);
1042 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
1043 }
1044
1045 /*!
1046 * brief Initialize for all ENET descriptors.
1047 *
1048 * note This function is do all tx/rx descriptors initialization. Because this API
1049 * read all interrupt registers first and then set the interrupt flag for all descriptos,
1050 * if the interrupt register is set. so the descriptor initialization should be called
1051 * after ENET_QOS_Init(), ENET_QOS_EnableInterrupts() and ENET_QOS_CreateHandle()(if transactional APIs
1052 * are used).
1053 *
1054 * param base ENET peripheral base address.
1055 * param config The configuration for ENET.
1056 * param bufferConfig All buffers configuration.
1057 */
ENET_QOS_DescriptorInit(ENET_QOS_Type * base,enet_qos_config_t * config,enet_qos_buffer_config_t * bufferConfig)1058 status_t ENET_QOS_DescriptorInit(ENET_QOS_Type *base, enet_qos_config_t *config, enet_qos_buffer_config_t *bufferConfig)
1059 {
1060 assert(config != NULL);
1061 assert(bufferConfig != NULL);
1062
1063 bool intTxEnable = false;
1064 bool intRxEnable = false;
1065 uint8_t ringNum = 1;
1066 uint8_t txQueueUse = 1;
1067 uint8_t rxQueueUse = 1;
1068 uint8_t channel;
1069
1070 if (config->multiqueueCfg != NULL)
1071 {
1072 ringNum = MAX(config->multiqueueCfg->txQueueUse, config->multiqueueCfg->rxQueueUse);
1073 txQueueUse = config->multiqueueCfg->txQueueUse;
1074 rxQueueUse = config->multiqueueCfg->rxQueueUse;
1075 }
1076
1077 for (channel = 0; channel < ringNum; channel++)
1078 {
1079 intRxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK) != 0U) ? true : false;
1080 intTxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_TIE_MASK) != 0U) ? true : false;
1081
1082 if (channel < txQueueUse)
1083 {
1084 if ((ENET_QOS_TxDescriptorsInit(base, bufferConfig, intTxEnable, channel) != kStatus_Success))
1085 {
1086 return kStatus_Fail;
1087 }
1088 }
1089
1090 if (channel < rxQueueUse)
1091 {
1092 if ((ENET_QOS_RxDescriptorsInit(base, config, bufferConfig, intRxEnable, channel) != kStatus_Success))
1093 {
1094 return kStatus_Fail;
1095 }
1096 }
1097
1098 bufferConfig++;
1099 }
1100 return kStatus_Success;
1101 }
1102
1103 /*!
1104 * brief Allocates Rx buffers for all BDs.
1105 * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
1106 * will populate initial buffers in all BDs for receiving. Then ENET_QOS_GetRxFrame() is used
1107 * to get Rx frame with zero copy, it will allocate new buffer to replace the buffer in BD taken
1108 * by application application should free those buffers after they're used.
1109 *
1110 * note This function should be called after ENET_QOS_CreateHandler() and buffer allocating callback
1111 * function should be ready.
1112 *
1113 * param base ENET_QOS peripheral base address.
1114 * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1115 */
ENET_QOS_RxBufferAllocAll(ENET_QOS_Type * base,enet_qos_handle_t * handle)1116 status_t ENET_QOS_RxBufferAllocAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
1117 {
1118 status_t result = kStatus_Success;
1119 enet_qos_rx_bd_struct_t *rxbdPtr;
1120 uint32_t buffAddr;
1121 uint8_t channel;
1122 uint16_t index;
1123 uint16_t j;
1124
1125 if ((handle->rxBuffAlloc == NULL) || (handle->rxBuffFree == NULL))
1126 {
1127 return kStatus_ENET_QOS_InitMemoryFail;
1128 }
1129
1130 for (channel = 0; channel < handle->rxQueueUse; channel++)
1131 {
1132 /* Init the rxbdPtr to the receive descriptor start address. */
1133 rxbdPtr = handle->rxBdRing[channel].rxBdBase;
1134 for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
1135 {
1136 if (handle->doubleBuffEnable)
1137 {
1138 index = 2U * j;
1139 }
1140 else
1141 {
1142 index = j;
1143 }
1144
1145 buffAddr = (uint32_t)(uintptr_t)(uint8_t *)handle->rxBuffAlloc(base, handle->userData, channel);
1146 if (buffAddr == 0U)
1147 {
1148 result = kStatus_ENET_QOS_InitMemoryFail;
1149 break;
1150 }
1151
1152 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1153 buffAddr = (uint32_t)MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
1154 #endif
1155 rxbdPtr->buff1Addr = buffAddr;
1156 handle->rxBufferStartAddr[channel][index] = buffAddr;
1157
1158 /* The second buffer is set with 0 because it is not required for normal case. */
1159 if (handle->doubleBuffEnable)
1160 {
1161 buffAddr = (uint32_t)(uintptr_t)(uint8_t *)handle->rxBuffAlloc(base, handle->userData, channel);
1162 if (buffAddr == 0U)
1163 {
1164 result = kStatus_ENET_QOS_InitMemoryFail;
1165 break;
1166 }
1167
1168 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1169 buffAddr = (uint32_t)MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
1170 #endif
1171 rxbdPtr->buff2Addr = buffAddr;
1172 handle->rxBufferStartAddr[channel][index + 1U] = buffAddr;
1173 }
1174 else
1175 {
1176 rxbdPtr->buff2Addr = 0;
1177 }
1178
1179 /* Set the valid and DMA own flag.*/
1180 rxbdPtr->control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
1181 rxbdPtr++;
1182 }
1183 }
1184
1185 if (result == kStatus_ENET_QOS_InitMemoryFail)
1186 {
1187 ENET_QOS_RxBufferFreeAll(base, handle);
1188 }
1189
1190 return result;
1191 }
1192
1193 /*!
1194 * brief Frees Rx buffers in all BDs.
1195 * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
1196 * will free left buffers in all BDs.
1197 *
1198 * param base ENET_QOS peripheral base address.
1199 * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1200 */
ENET_QOS_RxBufferFreeAll(ENET_QOS_Type * base,enet_qos_handle_t * handle)1201 void ENET_QOS_RxBufferFreeAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
1202 {
1203 uint32_t buffAddr;
1204 uint8_t channel;
1205 uint16_t index;
1206 uint16_t j;
1207
1208 if (handle->rxBuffFree != NULL)
1209 {
1210 for (channel = 0; channel < handle->rxQueueUse; channel++)
1211 {
1212 for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
1213 {
1214 if (handle->doubleBuffEnable)
1215 {
1216 index = 2U * j;
1217 }
1218 else
1219 {
1220 index = j;
1221 }
1222
1223 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1224 buffAddr = MEMORY_ConvertMemoryMapAddress((uintptr_t)handle->rxBufferStartAddr[channel][index],
1225 kMEMORY_DMA2Local);
1226 #else
1227 buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index];
1228 #endif
1229 if (buffAddr != 0U)
1230 {
1231 handle->rxBuffFree(base, (void *)(uint8_t *)(uintptr_t)buffAddr, handle->userData, channel);
1232 }
1233
1234 /* The second buffer is set with 0 because it is not required for normal case. */
1235 if (handle->doubleBuffEnable)
1236 {
1237 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1238 buffAddr = MEMORY_ConvertMemoryMapAddress((uintptr_t)handle->rxBufferStartAddr[channel][index + 1U],
1239 kMEMORY_DMA2Local);
1240 #else
1241 buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index + 1U];
1242 #endif
1243 if (buffAddr != 0U)
1244 {
1245 handle->rxBuffFree(base, (void *)(uint8_t *)(uintptr_t)buffAddr, handle->userData, channel);
1246 }
1247 }
1248 }
1249 }
1250 }
1251 }
1252
1253 /*!
1254 * brief Starts the ENET rx/tx.
1255 * This function enable the tx/rx and starts the rx/tx DMA.
1256 * This shall be set after ENET initialization and before
1257 * starting to receive the data.
1258 *
1259 * param base ENET peripheral base address.
1260 * param rxRingNum The number of the used rx rings. It shall not be
1261 * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
1262 * 1, the ring 0 will be used.
1263 * param txRingNum The number of the used tx rings. It shall not be
1264 * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
1265 * 1, the ring 0 will be used.
1266 *
1267 * note This must be called after all the ENET initilization.
1268 * And should be called when the ENET receive/transmit is required.
1269 */
ENET_QOS_StartRxTx(ENET_QOS_Type * base,uint8_t txRingNum,uint8_t rxRingNum)1270 void ENET_QOS_StartRxTx(ENET_QOS_Type *base, uint8_t txRingNum, uint8_t rxRingNum)
1271 {
1272 assert(txRingNum != 0U);
1273 assert(rxRingNum != 0U);
1274
1275 uint8_t index;
1276
1277 if (txRingNum > ENET_QOS_RING_NUM_MAX)
1278 {
1279 txRingNum = ENET_QOS_RING_NUM_MAX;
1280 }
1281 if (rxRingNum > ENET_QOS_RING_NUM_MAX)
1282 {
1283 rxRingNum = ENET_QOS_RING_NUM_MAX;
1284 }
1285 /* Start/Acive the DMA first. */
1286 for (index = 0; index < rxRingNum; index++)
1287 {
1288 base->DMA_CH[index].DMA_CHX_RX_CTRL |= ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
1289 }
1290 for (index = 0; index < txRingNum; index++)
1291 {
1292 base->DMA_CH[index].DMA_CHX_TX_CTRL |= ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1293 }
1294
1295 /* Enable the RX and TX at same time. */
1296 base->MAC_CONFIGURATION |= (ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
1297 }
1298
1299 /*!
1300 * brief Enables the ENET DMA and MAC interrupts.
1301 *
1302 * This function enables the ENET interrupt according to the provided mask. The mask
1303 * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1304 * For example, to enable the dma and mac interrupt, do the following.
1305 * code
1306 * ENET_QOS_EnableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
1307 * endcode
1308 *
1309 * param base ENET peripheral base address.
1310 * param mask ENET interrupts to enable. This is a logical OR of both
1311 * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1312 */
ENET_QOS_EnableInterrupts(ENET_QOS_Type * base,uint32_t mask)1313 void ENET_QOS_EnableInterrupts(ENET_QOS_Type *base, uint32_t mask)
1314 {
1315 uint32_t interrupt = mask & 0xFFFFU;
1316 uint8_t index;
1317
1318 /* For dma interrupt. */
1319 if (interrupt != 0U)
1320 {
1321 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
1322 {
1323 /* Set for all abnormal interrupts. */
1324 if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
1325 {
1326 interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
1327 }
1328 /* Set for all normal interrupts. */
1329 if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
1330 {
1331 interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1332 }
1333 base->DMA_CH[index].DMA_CHX_INT_EN = interrupt;
1334 }
1335 }
1336 interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
1337 if (interrupt != 0U)
1338 {
1339 /* MAC interrupt */
1340 base->MAC_INTERRUPT_ENABLE |= interrupt;
1341 }
1342 }
1343
1344 /*!
1345 * brief Clears the ENET mac interrupt events status flag.
1346 *
1347 * This function clears enabled ENET interrupts according to the provided mask. The mask
1348 * is a logical OR of enumeration members. See the ref enet_qos_mac_interrupt_enable_t.
1349 * For example, to clear the TX frame interrupt and RX frame interrupt, do the following.
1350 * code
1351 * ENET_QOS_ClearMacInterruptStatus(ENET, kENET_QOS_MacPmt);
1352 * endcode
1353 *
1354 * param base ENET peripheral base address.
1355 * param mask ENET interrupt source to be cleared.
1356 * This is the logical OR of members of the enumeration :: enet_qos_mac_interrupt_enable_t.
1357 */
ENET_QOS_ClearMacInterruptStatus(ENET_QOS_Type * base,uint32_t mask)1358 void ENET_QOS_ClearMacInterruptStatus(ENET_QOS_Type *base, uint32_t mask)
1359 {
1360 volatile uint32_t dummy;
1361
1362 if ((mask & (uint32_t)kENET_QOS_MacTimestamp) != 0U)
1363 {
1364 dummy = base->MAC_TIMESTAMP_STATUS;
1365 }
1366 else if ((mask & (uint32_t)kENET_QOS_MacPmt) != 0U)
1367 {
1368 dummy = base->MAC_PMT_CONTROL_STATUS;
1369 }
1370 else
1371 {
1372 /* Add for avoid the misra 2004 rule 14.10 */
1373 }
1374 (void)dummy;
1375 }
1376
1377 /*!
1378 * brief Disables the ENET DMA and MAC interrupts.
1379 *
1380 * This function disables the ENET interrupt according to the provided mask. The mask
1381 * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1382 * For example, to disable the dma and mac interrupt, do the following.
1383 * code
1384 * ENET_QOS_DisableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
1385 * endcode
1386 *
1387 * param base ENET peripheral base address.
1388 * param mask ENET interrupts to disables. This is a logical OR of both
1389 * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1390 */
ENET_QOS_DisableInterrupts(ENET_QOS_Type * base,uint32_t mask)1391 void ENET_QOS_DisableInterrupts(ENET_QOS_Type *base, uint32_t mask)
1392 {
1393 uint32_t interrupt = mask & 0xFFFFU;
1394 uint8_t index;
1395
1396 /* For dma interrupt. */
1397 if (interrupt != 0U)
1398 {
1399 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
1400 {
1401 /* Set for all abnormal interrupts. */
1402 if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
1403 {
1404 interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
1405 }
1406 /* Set for all normal interrupts. */
1407 if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
1408 {
1409 interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1410 }
1411 base->DMA_CH[index].DMA_CHX_INT_EN &= ~interrupt;
1412 }
1413 }
1414 interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
1415 if (interrupt != 0U)
1416 {
1417 /* MAC interrupt */
1418 base->MAC_INTERRUPT_ENABLE &= ~interrupt;
1419 }
1420 }
1421
1422 /*!
1423 * @brief Set the second level IRQ handler, allow user to overwrite the default
1424 * second level weak IRQ handler.
1425 *
1426 * @param ISRHandler he handler to install.
1427 */
ENET_QOS_SetISRHandler(ENET_QOS_Type * base,enet_qos_isr_t ISRHandler)1428 void ENET_QOS_SetISRHandler(ENET_QOS_Type *base, enet_qos_isr_t ISRHandler)
1429 {
1430 /* Update IRQ entry. */
1431 s_enetqosIsr = ISRHandler;
1432 /* Enable NVIC. */
1433 (void)EnableIRQ(s_enetqosIrqId[ENET_QOS_GetInstance(base)]);
1434 }
1435
1436 /*!
1437 * brief Create ENET Handler
1438 *
1439 * This is a transactional API and it's provided to store all datas which are needed
1440 * during the whole transactional process. This API should not be used when you use
1441 * functional APIs to do data tx/rx. This is funtion will store many data/flag for
1442 * transactional use, so all configure API such as ENET_QOS_Init(), ENET_QOS_DescriptorInit(),
1443 * ENET_QOS_EnableInterrupts() etc.
1444 *
1445 * note as our transactional transmit API use the zero-copy transmit buffer.
1446 * so there are two thing we emphasize here:
1447 * 1. tx buffer free/requeue for application should be done in the tx
1448 * interrupt handler. Please set callback: kENET_QOS_TxIntEvent with tx buffer free/requeue
1449 * process APIs.
1450 * 2. the tx interrupt is forced to open.
1451 *
1452 * param base ENET peripheral base address.
1453 * param handle ENET handler.
1454 * param config ENET configuration.
1455 * param bufferConfig ENET buffer configuration.
1456 * param callback The callback function.
1457 * param userData The application data.
1458 */
ENET_QOS_CreateHandler(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_config_t * config,enet_qos_buffer_config_t * bufferConfig,enet_qos_callback_t callback,void * userData)1459 void ENET_QOS_CreateHandler(ENET_QOS_Type *base,
1460 enet_qos_handle_t *handle,
1461 enet_qos_config_t *config,
1462 enet_qos_buffer_config_t *bufferConfig,
1463 enet_qos_callback_t callback,
1464 void *userData)
1465 {
1466 assert(config != NULL);
1467 assert(bufferConfig != NULL);
1468 assert(callback != NULL);
1469
1470 uint8_t ringNum = 1;
1471 uint8_t count = 0;
1472 uint32_t rxIntEnable = 0;
1473 uint8_t txQueueUse = 1;
1474 uint8_t rxQueueUse = 1;
1475 enet_qos_buffer_config_t *buffConfig = bufferConfig;
1476
1477 /* Store transfer parameters in handle pointer. */
1478 (void)memset(handle, 0, sizeof(enet_qos_handle_t));
1479
1480 if (config->multiqueueCfg != NULL)
1481 {
1482 txQueueUse = config->multiqueueCfg->txQueueUse;
1483 rxQueueUse = config->multiqueueCfg->rxQueueUse;
1484 ringNum = MAX(txQueueUse, rxQueueUse);
1485 }
1486
1487 handle->txQueueUse = txQueueUse;
1488 handle->rxQueueUse = rxQueueUse;
1489
1490 if ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U)
1491 {
1492 handle->doubleBuffEnable = true;
1493 }
1494
1495 for (count = 0; count < ringNum; count++)
1496 {
1497 if (count < txQueueUse)
1498 {
1499 handle->txBdRing[count].txBdBase = buffConfig->txDescStartAddrAlign;
1500 handle->txBdRing[count].txRingLen = buffConfig->txRingLen;
1501 handle->txBdRing[count].txGenIdx = 0;
1502 handle->txBdRing[count].txConsumIdx = 0;
1503 handle->txBdRing[count].txDescUsed = 0;
1504
1505 handle->txDirtyRing[count].txDirtyBase = buffConfig->txDirtyStartAddr;
1506 handle->txDirtyRing[count].txRingLen = buffConfig->txRingLen;
1507 handle->txDirtyRing[count].txGenIdx = 0;
1508 handle->txDirtyRing[count].txConsumIdx = 0;
1509
1510 /* Enable tx interrupt for use transactional API to do tx buffer free/requeue. */
1511 base->DMA_CH[count].DMA_CHX_INT_EN |= ENET_QOS_DMA_CHX_INT_EN_TIE_MASK | ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1512 }
1513
1514 if (count < rxQueueUse)
1515 {
1516 handle->rxBdRing[count].rxBdBase = buffConfig->rxDescStartAddrAlign;
1517 handle->rxBdRing[count].rxGenIdx = 0;
1518 handle->rxBdRing[count].rxRingLen = buffConfig->rxRingLen;
1519 handle->rxBdRing[count].rxBuffSizeAlign = buffConfig->rxBuffSizeAlign;
1520
1521 /* Record rx buffer address for re-init Rx buffer descriptor */
1522 handle->rxBufferStartAddr[count] = buffConfig->rxBufferStartAddr;
1523
1524 /* Record rx buffer need cache maintain */
1525 handle->rxMaintainEnable[count] = buffConfig->rxBuffNeedMaintain;
1526
1527 /* Check if the rx interrrupt is enabled. */
1528 rxIntEnable |= (base->DMA_CH[count].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK);
1529 }
1530
1531 buffConfig++;
1532 }
1533
1534 handle->rxintEnable = (rxIntEnable != 0U) ? true : false;
1535
1536 /* Save the handle pointer in the global variables. */
1537 s_ENETHandle[ENET_QOS_GetInstance(base)] = handle;
1538
1539 /* Set Rx alloc/free callback. */
1540 handle->rxBuffAlloc = config->rxBuffAlloc;
1541 handle->rxBuffFree = config->rxBuffFree;
1542
1543 /* Set callback and userData. */
1544 handle->callback = callback;
1545 handle->userData = userData;
1546
1547 /* Use default ENET_QOS_CommonIRQHandler as default weak IRQ handler. */
1548 ENET_QOS_SetISRHandler(base, ENET_QOS_CommonIRQHandler);
1549 }
1550
1551 /*!
1552 * brief Gets the ENET module Mac address.
1553 *
1554 * param base ENET peripheral base address.
1555 * param macAddr The six-byte Mac address pointer.
1556 * The pointer is allocated by application and input into the API.
1557 */
ENET_QOS_GetMacAddr(ENET_QOS_Type * base,uint8_t * macAddr,uint8_t index)1558 void ENET_QOS_GetMacAddr(ENET_QOS_Type *base, uint8_t *macAddr, uint8_t index)
1559 {
1560 assert(macAddr != NULL);
1561
1562 uint32_t address = base->MAC_ADDRESS[index].LOW;
1563
1564 /* Get from physical address lower register. */
1565 macAddr[2] = (uint8_t)(0xFFU & (address >> 24U));
1566 macAddr[3] = (uint8_t)(0xFFU & (address >> 16U));
1567 macAddr[4] = (uint8_t)(0xFFU & (address >> 8U));
1568 macAddr[5] = (uint8_t)(0xFFU & address);
1569
1570 /* Get from physical address high register. */
1571 address = base->MAC_ADDRESS[index].HIGH;
1572 macAddr[0] = (uint8_t)(0xFFU & (address >> 8U));
1573 macAddr[1] = (uint8_t)(0xFFU & address);
1574 }
1575
1576 /*!
1577 * brief Adds the ENET_QOS device to a multicast group.
1578 *
1579 * param base ENET_QOS peripheral base address.
1580 * param address The six-byte multicast group address which is provided by application.
1581 */
ENET_QOS_AddMulticastGroup(ENET_QOS_Type * base,uint8_t * address)1582 void ENET_QOS_AddMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
1583 {
1584 assert(address != NULL);
1585
1586 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
1587 uint32_t crc = 0xFFFFFFFFU;
1588 uint32_t count1 = 0;
1589 uint32_t count2 = 0;
1590
1591 /* Calculates the CRC-32 polynomial on the multicast group address. */
1592 for (count1 = 0; count1 < 6U; count1++)
1593 {
1594 uint8_t c = address[count1];
1595 for (count2 = 0; count2 < 0x08U; count2++)
1596 {
1597 if (((c ^ crc) & 1U) != 0U)
1598 {
1599 crc >>= 1U;
1600 c >>= 1U;
1601 crc ^= 0xEDB88320U;
1602 }
1603 else
1604 {
1605 crc >>= 1U;
1606 c >>= 1U;
1607 }
1608 }
1609 }
1610
1611 /* Calculate bitwise reverse value. */
1612 crc = ENET_QOS_ReverseBits(~crc);
1613
1614 /* Get highest 6 bits*/
1615 crc = crc >> 26U;
1616
1617 handle->multicastCount[crc]++;
1618
1619 if (0U != (crc & 0x20U))
1620 {
1621 base->MAC_HASH_TABLE_REG1 |= (1UL << (crc & 0x1FU));
1622 }
1623 else
1624 {
1625 base->MAC_HASH_TABLE_REG0 |= (1UL << (crc & 0x1FU));
1626 }
1627 }
1628
1629 /*!
1630 * brief Moves the ENET_QOS device from a multicast group.
1631 *
1632 * param base ENET_QOS peripheral base address.
1633 * param address The six-byte multicast group address which is provided by application.
1634 */
ENET_QOS_LeaveMulticastGroup(ENET_QOS_Type * base,uint8_t * address)1635 void ENET_QOS_LeaveMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
1636 {
1637 assert(address != NULL);
1638
1639 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
1640 uint32_t crc = 0xFFFFFFFFU;
1641 uint32_t count1 = 0;
1642 uint32_t count2 = 0;
1643
1644 /* Calculates the CRC-32 polynomial on the multicast group address. */
1645 for (count1 = 0; count1 < 6U; count1++)
1646 {
1647 uint8_t c = address[count1];
1648 for (count2 = 0; count2 < 0x08U; count2++)
1649 {
1650 if (((c ^ crc) & 1U) != 0U)
1651 {
1652 crc >>= 1U;
1653 c >>= 1U;
1654 crc ^= 0xEDB88320U;
1655 }
1656 else
1657 {
1658 crc >>= 1U;
1659 c >>= 1U;
1660 }
1661 }
1662 }
1663
1664 /* Calculate bitwise reverse value. */
1665 crc = ENET_QOS_ReverseBits(~crc);
1666
1667 /* Get highest 6 bits*/
1668 crc = crc >> 26U;
1669
1670 handle->multicastCount[crc]--;
1671
1672 /* Set the hash table if no collisions */
1673 if (0U == handle->multicastCount[crc])
1674 {
1675 if (0U != (crc & 0x20U))
1676 {
1677 base->MAC_HASH_TABLE_REG1 &= ~((1UL << (crc & 0x1FU)));
1678 }
1679 else
1680 {
1681 base->MAC_HASH_TABLE_REG0 &= ~((1UL << (crc & 0x1FU)));
1682 }
1683 }
1684 }
1685
1686 /*!
1687 * brief Sets the ENET SMI(serial management interface)- MII management interface.
1688 *
1689 * param base ENET peripheral base address.
1690 */
ENET_QOS_SetSMI(ENET_QOS_Type * base,uint32_t csrClock_Hz)1691 void ENET_QOS_SetSMI(ENET_QOS_Type *base, uint32_t csrClock_Hz)
1692 {
1693 uint32_t crDiv = 0;
1694 uint32_t srcClock_Hz = csrClock_Hz / 1000000U;
1695
1696 assert((srcClock_Hz >= 20U) && (srcClock_Hz < 800U));
1697
1698 if (srcClock_Hz < 35U)
1699 {
1700 crDiv = 2;
1701 }
1702 else if (srcClock_Hz < 60U)
1703 {
1704 crDiv = 3;
1705 }
1706 else if (srcClock_Hz < 100U)
1707 {
1708 crDiv = 0;
1709 }
1710 else if (srcClock_Hz < 150U)
1711 {
1712 crDiv = 1;
1713 }
1714 else if (srcClock_Hz < 250U)
1715 {
1716 crDiv = 4;
1717 }
1718 else if (srcClock_Hz < 300U)
1719 {
1720 crDiv = 5;
1721 }
1722 else if (srcClock_Hz < 500U)
1723 {
1724 crDiv = 6;
1725 }
1726 else if (srcClock_Hz < 800U)
1727 {
1728 crDiv = 7;
1729 }
1730 else
1731 {
1732 /* Empty else */
1733 }
1734
1735 base->MAC_MDIO_ADDRESS = ENET_QOS_MAC_MDIO_ADDRESS_CR(crDiv);
1736 }
1737
1738 /*!
1739 * @brief Sends the MDIO IEEE802.3 Clause 22 format write command.
1740 * After send command, user needs to check whether the transmission is over
1741 * with ENET_QOS_IsSMIBusy().
1742 *
1743 * @param base ENET peripheral base address.
1744 * @param phyAddr The PHY address.
1745 * @param regAddr The PHY register address.
1746 * @param data The data written to PHY.
1747 */
ENET_QOS_StartSMIWrite(ENET_QOS_Type * base,uint8_t phyAddr,uint8_t regAddr,uint16_t data)1748 void ENET_QOS_StartSMIWrite(ENET_QOS_Type *base, uint8_t phyAddr, uint8_t regAddr, uint16_t data)
1749 {
1750 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1751
1752 /* Build MII write command. */
1753 base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiWriteFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
1754 ENET_QOS_MAC_MDIO_ADDRESS_RDA(regAddr);
1755 base->MAC_MDIO_DATA = data;
1756 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1757 }
1758
1759 /*!
1760 * @brief Sends the MDIO IEEE802.3 Clause 22 format read command.
1761 * After send command, user needs to check whether the transmission is over
1762 * with ENET_QOS_IsSMIBusy().
1763 *
1764 * @param base ENET peripheral base address.
1765 * @param phyAddr The PHY address.
1766 * @param regAddr The PHY register address.
1767 */
ENET_QOS_StartSMIRead(ENET_QOS_Type * base,uint8_t phyAddr,uint8_t regAddr)1768 void ENET_QOS_StartSMIRead(ENET_QOS_Type *base, uint8_t phyAddr, uint8_t regAddr)
1769 {
1770 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1771
1772 /* Build MII read command. */
1773 base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiReadFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
1774 ENET_QOS_MAC_MDIO_ADDRESS_RDA(regAddr);
1775 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1776 }
1777
1778 /*!
1779 * @brief Sends the MDIO IEEE802.3 Clause 45 format write command.
1780 * After send command, user needs to check whether the transmission is over
1781 * with ENET_QOS_IsSMIBusy().
1782 *
1783 * @param base ENET peripheral base address.
1784 * @param portAddr The MDIO port address(PHY address).
1785 * @param devAddr The device address.
1786 * @param regAddr The PHY register address.
1787 * @param data The data written to PHY.
1788 */
ENET_QOS_StartExtC45SMIWrite(ENET_QOS_Type * base,uint8_t portAddr,uint8_t devAddr,uint16_t regAddr,uint16_t data)1789 void ENET_QOS_StartExtC45SMIWrite(
1790 ENET_QOS_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr, uint16_t data)
1791 {
1792 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1793
1794 /* Build MII write command. */
1795 base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiWriteFrame |
1796 ENET_QOS_MAC_MDIO_ADDRESS_PA(portAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(devAddr);
1797 base->MAC_MDIO_DATA = data | ENET_QOS_MAC_MDIO_DATA_RA(regAddr);
1798 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1799 }
1800
1801 /*!
1802 * @brief Sends the MDIO IEEE802.3 Clause 45 format read command.
1803 * After send command, user needs to check whether the transmission is over
1804 * with ENET_QOS_IsSMIBusy().
1805 *
1806 * @param base ENET peripheral base address.
1807 * @param portAddr The MDIO port address(PHY address).
1808 * @param devAddr The device address.
1809 * @param regAddr The PHY register address.
1810 */
ENET_QOS_StartExtC45SMIRead(ENET_QOS_Type * base,uint8_t portAddr,uint8_t devAddr,uint16_t regAddr)1811 void ENET_QOS_StartExtC45SMIRead(ENET_QOS_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr)
1812 {
1813 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1814
1815 /* Build MII read command. */
1816 base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiReadFrame |
1817 ENET_QOS_MAC_MDIO_ADDRESS_PA(portAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(devAddr);
1818 base->MAC_MDIO_DATA = ENET_QOS_MAC_MDIO_DATA_RA(regAddr);
1819 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1820 }
1821
ENET_QOS_MDIOWaitTransferOver(ENET_QOS_Type * base)1822 static status_t ENET_QOS_MDIOWaitTransferOver(ENET_QOS_Type *base)
1823 {
1824 status_t result = kStatus_Success;
1825 #ifdef ENET_QOS_MDIO_TIMEOUT_COUNT
1826 uint32_t counter;
1827 #endif
1828
1829 #ifdef ENET_QOS_MDIO_TIMEOUT_COUNT
1830 for (counter = ENET_QOS_MDIO_TIMEOUT_COUNT; counter > 0U; counter--)
1831 {
1832 if (!ENET_QOS_IsSMIBusy(base))
1833 {
1834 break;
1835 }
1836 }
1837 /* Check for timeout. */
1838 if (0U == counter)
1839 {
1840 result = kStatus_Timeout;
1841 }
1842 #else
1843 while (ENET_QOS_IsSMIBusy(base))
1844 {
1845 }
1846 #endif
1847 return result;
1848 }
1849
1850 /*!
1851 * @brief MDIO write with IEEE802.3 MDIO Clause 22 format.
1852 *
1853 * @param base ENET peripheral base address.
1854 * @param phyAddr The PHY address.
1855 * @param regAddr The PHY register.
1856 * @param data The data written to PHY.
1857 * @return kStatus_Success MDIO access succeeds.
1858 * @return kStatus_Timeout MDIO access timeout.
1859 */
ENET_QOS_MDIOWrite(ENET_QOS_Type * base,uint8_t phyAddr,uint8_t regAddr,uint16_t data)1860 status_t ENET_QOS_MDIOWrite(ENET_QOS_Type *base, uint8_t phyAddr, uint8_t regAddr, uint16_t data)
1861 {
1862 ENET_QOS_StartSMIWrite(base, phyAddr, regAddr, data);
1863
1864 return ENET_QOS_MDIOWaitTransferOver(base);
1865 }
1866
1867 /*!
1868 * @brief MDIO read with IEEE802.3 MDIO Clause 22 format.
1869 *
1870 * @param base ENET peripheral base address.
1871 * @param phyAddr The PHY address.
1872 * @param regAddr The PHY register.
1873 * @param pData The data read from PHY.
1874 * @return kStatus_Success MDIO access succeeds.
1875 * @return kStatus_Timeout MDIO access timeout.
1876 */
ENET_QOS_MDIORead(ENET_QOS_Type * base,uint8_t phyAddr,uint8_t regAddr,uint16_t * pData)1877 status_t ENET_QOS_MDIORead(ENET_QOS_Type *base, uint8_t phyAddr, uint8_t regAddr, uint16_t *pData)
1878 {
1879 assert(pData);
1880
1881 status_t result;
1882
1883 ENET_QOS_StartSMIRead(base, phyAddr, regAddr);
1884
1885 result = ENET_QOS_MDIOWaitTransferOver(base);
1886 if (result != kStatus_Success)
1887 {
1888 return result;
1889 }
1890 *pData = ENET_QOS_ReadSMIData(base);
1891
1892 return result;
1893 }
1894
1895 /*!
1896 * @brief MDIO write with IEEE802.3 Clause 45 format.
1897 *
1898 * @param base ENET peripheral base address.
1899 * @param portAddr The MDIO port address(PHY address).
1900 * @param devAddr The device address.
1901 * @param regAddr The PHY register address.
1902 * @param data The data written to PHY.
1903 * @return kStatus_Success MDIO access succeeds.
1904 * @return kStatus_Timeout MDIO access timeout.
1905 */
ENET_QOS_MDIOC45Write(ENET_QOS_Type * base,uint8_t portAddr,uint8_t devAddr,uint16_t regAddr,uint16_t data)1906 status_t ENET_QOS_MDIOC45Write(ENET_QOS_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr, uint16_t data)
1907 {
1908 ENET_QOS_StartExtC45SMIWrite(base, portAddr, devAddr, regAddr, data);
1909
1910 return ENET_QOS_MDIOWaitTransferOver(base);
1911 }
1912
1913 /*!
1914 * @brief MDIO read with IEEE802.3 Clause 45 format.
1915 *
1916 * @param base ENET peripheral base address.
1917 * @param portAddr The MDIO port address(PHY address).
1918 * @param devAddr The device address.
1919 * @param regAddr The PHY register address.
1920 * @param pData The data read from PHY.
1921 * @return kStatus_Success MDIO access succeeds.
1922 * @return kStatus_Timeout MDIO access timeout.
1923 */
ENET_QOS_MDIOC45Read(ENET_QOS_Type * base,uint8_t portAddr,uint8_t devAddr,uint16_t regAddr,uint16_t * pData)1924 status_t ENET_QOS_MDIOC45Read(ENET_QOS_Type *base, uint8_t portAddr, uint8_t devAddr, uint16_t regAddr, uint16_t *pData)
1925 {
1926 status_t result = kStatus_Success;
1927
1928 ENET_QOS_StartExtC45SMIRead(base, portAddr, devAddr, regAddr);
1929
1930 result = ENET_QOS_MDIOWaitTransferOver(base);
1931 if (result != kStatus_Success)
1932 {
1933 return result;
1934 }
1935 *pData = ENET_QOS_ReadSMIData(base);
1936
1937 return result;
1938 }
1939
1940 /*!
1941 * brief Set the MAC to enter into power down mode.
1942 * the remote power wake up frame and magic frame can wake up
1943 * the ENET from the power down mode.
1944 *
1945 * param base ENET peripheral base address.
1946 * param wakeFilter The wakeFilter provided to configure the wake up frame fitlter.
1947 * Set the wakeFilter to NULL is not required. But if you have the filter requirement,
1948 * please make sure the wakeFilter pointer shall be eight continous
1949 * 32-bits configuration.
1950 */
ENET_QOS_EnterPowerDown(ENET_QOS_Type * base,uint32_t * wakeFilter)1951 void ENET_QOS_EnterPowerDown(ENET_QOS_Type *base, uint32_t *wakeFilter)
1952 {
1953 uint8_t index;
1954 uint32_t *reg = wakeFilter;
1955
1956 /* Disable the tx dma. */
1957 base->DMA_CH[0].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1958 base->DMA_CH[1].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1959
1960 /* Disable the mac tx/rx. */
1961 base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_RE_MASK | ENET_QOS_MAC_CONFIGURATION_TE_MASK);
1962 /* Enable the remote wakeup packet and enable the power down mode. */
1963 if (wakeFilter != NULL)
1964 {
1965 for (index = 0; index < ENET_QOS_WAKEUPFILTER_NUM; index++)
1966 {
1967 base->MAC_RWK_PACKET_FILTER = *reg;
1968 reg++;
1969 }
1970 }
1971 base->MAC_PMT_CONTROL_STATUS = ENET_QOS_MAC_PMT_CONTROL_STATUS_MGKPKTEN_MASK |
1972 ENET_QOS_MAC_PMT_CONTROL_STATUS_RWKPKTEN_MASK |
1973 ENET_QOS_MAC_PMT_CONTROL_STATUS_PWRDWN_MASK;
1974
1975 /* Enable the MAC rx. */
1976 base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
1977 }
1978
1979 /*!
1980 * brief Enable/Disable Rx parser, please notice that for enable/disable Rx Parser,
1981 * should better disable Receive first.
1982 *
1983 * param base ENET_QOS peripheral base address.
1984 * param enable Enable/Disable Rx parser function
1985 */
ENET_QOS_EnableRxParser(ENET_QOS_Type * base,bool enable)1986 status_t ENET_QOS_EnableRxParser(ENET_QOS_Type *base, bool enable)
1987 {
1988 status_t result = kStatus_Success;
1989
1990 if (enable)
1991 {
1992 base->MTL_OPERATION_MODE |= ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
1993 }
1994 else
1995 {
1996 base->MTL_OPERATION_MODE &= ~ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
1997 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_CONTROL_STATUS), ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK,
1998 ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK);
1999 }
2000
2001 return result;
2002 }
2003
2004 /*!
2005 * brief Gets the size of the read frame.
2006 * This function gets a received frame size from the ENET buffer descriptors.
2007 * note The FCS of the frame is automatically removed by MAC and the size is the length without the FCS.
2008 * After calling ENET_QOS_GetRxFrameSize, ENET_QOS_ReadFrame() should be called to update the
2009 * receive buffers If the result is not "kStatus_ENET_QOS_RxFrameEmpty".
2010 *
2011 * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
2012 * param length The length of the valid frame received.
2013 * param channel The DMAC channel for the rx.
2014 * retval kStatus_ENET_QOS_RxFrameEmpty No frame received. Should not call ENET_QOS_ReadFrame to read frame.
2015 * retval kStatus_ENET_QOS_RxFrameError Data error happens. ENET_QOS_ReadFrame should be called with NULL data
2016 * and NULL length to update the receive buffers.
2017 * retval kStatus_Success Receive a frame Successfully then the ENET_QOS_ReadFrame
2018 * should be called with the right data buffer and the captured data length input.
2019 */
ENET_QOS_GetRxFrameSize(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint32_t * length,uint8_t channel)2020 status_t ENET_QOS_GetRxFrameSize(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint32_t *length, uint8_t channel)
2021 {
2022 assert(handle != NULL);
2023 assert(length != NULL);
2024
2025 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2026 enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2027 uint16_t index = rxBdRing->rxGenIdx;
2028 uint32_t control = rxDesc->control;
2029
2030 /* Reset the length to zero. */
2031 *length = 0;
2032
2033 if ((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
2034 {
2035 return kStatus_ENET_QOS_RxFrameEmpty;
2036 }
2037 else
2038 {
2039 do
2040 {
2041 /* Application owns the buffer descriptor, get the length. */
2042 if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2043 {
2044 if ((control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
2045 {
2046 return kStatus_ENET_QOS_RxFrameError;
2047 }
2048 *length = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
2049 return kStatus_Success;
2050 }
2051
2052 index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
2053 rxDesc = &rxBdRing->rxBdBase[index];
2054 control = rxDesc->control;
2055 } while (index != rxBdRing->rxGenIdx);
2056
2057 return kStatus_ENET_QOS_RxFrameError;
2058 }
2059 }
2060
ENET_QOS_DropFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t channel)2061 static void ENET_QOS_DropFrame(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
2062 {
2063 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2064 enet_qos_rx_bd_struct_t *rxDesc;
2065 uint16_t index = rxBdRing->rxGenIdx;
2066 bool tsAvailable = false;
2067 uintptr_t buff1Addr = 0;
2068 uintptr_t buff2Addr = 0;
2069 uint32_t rxDescTail;
2070 uint32_t rdesc1;
2071 uint32_t rdesc3;
2072
2073 /* Not check DMA ownership here, assume there's at least one valid frame left in BD ring */
2074 do
2075 {
2076 /* Get the control flag. */
2077 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2078 rdesc1 = rxDesc->reserved;
2079 rdesc3 = rxDesc->control;
2080
2081 if (!handle->doubleBuffEnable)
2082 {
2083 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2084 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
2085 handle->doubleBuffEnable);
2086 }
2087 else
2088 {
2089 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2090 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2091 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2092 handle->rxintEnable, handle->doubleBuffEnable);
2093 }
2094
2095 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2096
2097 /* Find the last buffer descriptor for the frame. */
2098 if ((rdesc3 & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2099 {
2100 if ((rdesc3 & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
2101 {
2102 if ((rdesc1 & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
2103 {
2104 tsAvailable = true;
2105 }
2106 }
2107
2108 /* Reinit for the context descriptor which has been updated by DMA. */
2109 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2110
2111 if (tsAvailable && ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U))
2112 {
2113 if (!handle->doubleBuffEnable)
2114 {
2115 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2116 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
2117 handle->doubleBuffEnable);
2118 }
2119 else
2120 {
2121 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2122 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2123 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2124 handle->rxintEnable, handle->doubleBuffEnable);
2125 }
2126 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2127 }
2128 break;
2129 }
2130 } while (rxBdRing->rxGenIdx != index);
2131
2132 /* Always try to start receive, in case it had stopped */
2133 rxDescTail = (uint32_t)(uintptr_t)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
2134 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2135 rxDescTail = MEMORY_ConvertMemoryMapAddress(rxDescTail, kMEMORY_Local2DMA);
2136 #endif
2137 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = rxDescTail;
2138 }
2139
2140 /*!
2141 * brief Reads a frame from the ENET device.
2142 * This function reads a frame from the ENET DMA descriptors.
2143 * The ENET_QOS_GetRxFrameSize should be used to get the size of the prepared data buffer.
2144 * For example use rx dma channel 0:
2145 * code
2146 * uint32_t length;
2147 * enet_qos_handle_t g_handle;
2148 * enet_qos_ptp_time_t ts;
2149 * status = ENET_QOS_GetRxFrameSize(&g_handle, &length, 0);
2150 * if (length != 0)
2151 * {
2152 * uint8_t *data = memory allocate interface;
2153 * if (!data)
2154 * {
2155 * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
2156 * }
2157 * else
2158 * {
2159 * status = ENET_QOS_ReadFrame(ENET, &g_handle, data, length, 0, &ts);
2160 * }
2161 * }
2162 * else if (status == kStatus_ENET_QOS_RxFrameError)
2163 * {
2164 * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
2165 * }
2166 * endcode
2167 * param base ENET peripheral base address.
2168 * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
2169 * param data The data buffer provided by user to store the frame which memory size should be at least "length".
2170 * param length The size of the data buffer which is still the length of the received frame.
2171 * param channel The rx DMA channel. shall not be larger than 2.
2172 * return The execute status, successful or failure.
2173 */
ENET_QOS_ReadFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t * data,uint32_t length,uint8_t channel,enet_qos_ptp_time_t * ts)2174 status_t ENET_QOS_ReadFrame(ENET_QOS_Type *base,
2175 enet_qos_handle_t *handle,
2176 uint8_t *data,
2177 uint32_t length,
2178 uint8_t channel,
2179 enet_qos_ptp_time_t *ts)
2180 {
2181 assert(handle != NULL);
2182 assert(channel < handle->rxQueueUse);
2183
2184 uint32_t len = 0;
2185 uint32_t offset = 0;
2186 uint32_t control;
2187 bool isLastBuff = false;
2188 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2189 enet_qos_rx_bd_struct_t *rxDesc;
2190 status_t result = kStatus_Fail;
2191 uintptr_t buff1Addr = 0; /*!< Buffer 1 address */
2192 uintptr_t buff2Addr = 0; /*!< Buffer 2 or next descriptor address */
2193 uint32_t rxDescTail;
2194
2195 bool tsAvailable = false;
2196
2197 /* For data-NULL input, only update the buffer descriptor. */
2198 if (data == NULL)
2199 {
2200 ENET_QOS_DropFrame(base, handle, channel);
2201 result = kStatus_Success;
2202 }
2203 else
2204 {
2205 while (!isLastBuff)
2206 {
2207 /* The last buffer descriptor of a frame. */
2208 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2209 control = rxDesc->control;
2210
2211 if (!handle->doubleBuffEnable)
2212 {
2213 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2214 if (handle->rxMaintainEnable[channel])
2215 {
2216 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2217 /* Add the cache invalidate maintain. */
2218 ENET_QOS_DcacheInvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2219 rxBdRing->rxBuffSizeAlign);
2220 #else
2221 /* Add the cache invalidate maintain. */
2222 ENET_QOS_DcacheInvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2223 #endif
2224 }
2225 }
2226 else
2227 {
2228 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2229 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2230 if (handle->rxMaintainEnable[channel])
2231 {
2232 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2233 /* Add the cache invalidate maintain. */
2234 ENET_QOS_DcacheInvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2235 rxBdRing->rxBuffSizeAlign);
2236 /* Add the cache invalidate maintain. */
2237 ENET_QOS_DcacheInvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2238 rxBdRing->rxBuffSizeAlign);
2239 #else
2240 /* Add the cache invalidate maintain. */
2241 ENET_QOS_DcacheInvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2242 /* Add the cache invalidate maintain. */
2243 ENET_QOS_DcacheInvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
2244 #endif
2245 }
2246 }
2247
2248 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2249
2250 if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2251 {
2252 /* This is a valid frame. */
2253 isLastBuff = true;
2254
2255 /* Remove FCS */
2256 len = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
2257
2258 if (length == len)
2259 {
2260 /* Copy the frame to user's buffer. */
2261 len -= offset;
2262
2263 if (len > rxBdRing->rxBuffSizeAlign)
2264 {
2265 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2266 (void)memcpy((void *)&data[offset],
2267 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2268 rxBdRing->rxBuffSizeAlign);
2269 #else
2270 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
2271 #endif
2272 offset += rxBdRing->rxBuffSizeAlign;
2273 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2274 (void)memcpy((void *)&data[offset],
2275 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2276 len - rxBdRing->rxBuffSizeAlign);
2277 #else
2278 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr,
2279 len - rxBdRing->rxBuffSizeAlign);
2280 #endif
2281 }
2282 else
2283 {
2284 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2285 (void)memcpy((void *)&data[offset],
2286 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2287 len);
2288 #else
2289 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, len);
2290 #endif
2291 }
2292
2293 result = kStatus_Success;
2294 }
2295
2296 if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
2297 {
2298 tsAvailable = true;
2299 }
2300 /* Updates the receive buffer descriptors. */
2301 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2302 handle->rxintEnable, handle->doubleBuffEnable);
2303 /* Store the rx timestamp which is in the next buffer descriptor of the last
2304 * descriptor of a frame. */
2305 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2306 control = rxDesc->control;
2307
2308 /* If tsAvailable is true, a context descriptor is expected but might not be yet
2309 * available.
2310 */
2311 if (tsAvailable)
2312 {
2313 uint8_t retryTimes = 10;
2314
2315 while (((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
2316 ((control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
2317 {
2318 SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
2319 if (0U == retryTimes--)
2320 {
2321 assert(false);
2322 }
2323 control = rxDesc->control;
2324 }
2325
2326 /* Reinit for the context descriptor which has been updated by DMA. */
2327 if (NULL != ts)
2328 {
2329 ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, ts);
2330 }
2331
2332 if (!handle->doubleBuffEnable)
2333 {
2334 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2335 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
2336 handle->doubleBuffEnable);
2337 }
2338 else
2339 {
2340 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2341 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2342 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2343 handle->rxintEnable, handle->doubleBuffEnable);
2344 }
2345 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2346 }
2347 }
2348 else
2349 {
2350 /* Store a frame on several buffer descriptors. */
2351 isLastBuff = false;
2352 /* Length check. */
2353 if (offset >= length)
2354 {
2355 /* Updates the receive buffer descriptors. */
2356 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2357 handle->rxintEnable, handle->doubleBuffEnable);
2358 break;
2359 }
2360
2361 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2362 (void)memcpy((void *)&data[offset],
2363 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2364 rxBdRing->rxBuffSizeAlign);
2365 #else
2366 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
2367 #endif
2368
2369 offset += rxBdRing->rxBuffSizeAlign;
2370 if (buff2Addr != 0U)
2371 {
2372 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2373 (void)memcpy((void *)&data[offset],
2374 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2375 rxBdRing->rxBuffSizeAlign);
2376 #else
2377 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr, rxBdRing->rxBuffSizeAlign);
2378 #endif
2379 offset += rxBdRing->rxBuffSizeAlign;
2380 }
2381
2382 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2383 handle->rxintEnable, handle->doubleBuffEnable);
2384 }
2385 }
2386
2387 /* Always try to start receive, in case it had stopped */
2388 rxDescTail = (uint32_t)(uintptr_t)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
2389 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2390 rxDescTail = MEMORY_ConvertMemoryMapAddress(rxDescTail, kMEMORY_Local2DMA);
2391 #endif
2392 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = rxDescTail;
2393 }
2394
2395 return result;
2396 }
2397
2398 /*!
2399 * brief Updates the buffers and the own status for a given rx descriptor.
2400 * This function is a low level functional API to Updates the
2401 * buffers and the own status for a given rx descriptor.
2402 *
2403 * param rxDesc The given rx descriptor.
2404 * param buffer1 The first buffer address in the descriptor.
2405 * param buffer2 The second buffer address in the descriptor.
2406 * param intEnable Interrupt enable flag.
2407 * param doubleBuffEnable The double buffer enable flag.
2408 *
2409 * note This must be called after all the ENET initilization.
2410 * And should be called when the ENET receive/transmit is required.
2411 */
ENET_QOS_UpdateRxDescriptor(enet_qos_rx_bd_struct_t * rxDesc,void * buffer1,void * buffer2,bool intEnable,bool doubleBuffEnable)2412 void ENET_QOS_UpdateRxDescriptor(
2413 enet_qos_rx_bd_struct_t *rxDesc, void *buffer1, void *buffer2, bool intEnable, bool doubleBuffEnable)
2414 {
2415 assert(rxDesc != NULL);
2416 uint32_t control = ENET_QOS_RXDESCRIP_RD_OWN_MASK | ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
2417
2418 if (intEnable)
2419 {
2420 control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
2421 }
2422
2423 if (doubleBuffEnable)
2424 {
2425 control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
2426 }
2427
2428 /* Update the buffer if needed. */
2429 if (buffer1 != NULL)
2430 {
2431 rxDesc->buff1Addr = (uint32_t)(uintptr_t)(uint8_t *)buffer1;
2432 }
2433 if (buffer2 != NULL)
2434 {
2435 rxDesc->buff2Addr = (uint32_t)(uintptr_t)(uint8_t *)buffer2;
2436 }
2437 else
2438 {
2439 rxDesc->buff2Addr = 0;
2440 }
2441
2442 rxDesc->reserved = 0;
2443
2444 /* Add a data barrier to be sure that the address is written before the
2445 ownership bit status. */
2446 __DMB();
2447
2448 rxDesc->control = control;
2449 }
2450
2451 /*!
2452 * brief Setup a given tx descriptor.
2453 * This function is a low level functional API to setup or prepare
2454 * a given tx descriptor.
2455 *
2456 * param txDesc The given tx descriptor.
2457 * param buffer1 The first buffer address in the descriptor.
2458 * param bytes1 The bytes in the fist buffer.
2459 * param buffer2 The second buffer address in the descriptor.
2460 * param bytes1 The bytes in the second buffer.
2461 * param framelen The length of the frame to be transmitted.
2462 * param intEnable Interrupt enable flag.
2463 * param tsEnable The timestamp enable.
2464 * param flag The flag of this tx desciriptor, see "enet_qos_desc_flag" .
2465 * param slotNum The slot num used for AV only.
2466 *
2467 * note This must be called after all the ENET initilization.
2468 * And should be called when the ENET receive/transmit is required.
2469 * Transmit buffers are 'zero-copy' buffers, so the buffer must remain in
2470 * memory until the packet has been fully transmitted. The buffers
2471 * should be free or requeued in the transmit interrupt irq handler.
2472 */
ENET_QOS_SetupTxDescriptor(enet_qos_tx_bd_struct_t * txDesc,void * buffer1,uint32_t bytes1,void * buffer2,uint32_t bytes2,uint32_t framelen,bool intEnable,bool tsEnable,enet_qos_desc_flag flag,uint8_t slotNum)2473 void ENET_QOS_SetupTxDescriptor(enet_qos_tx_bd_struct_t *txDesc,
2474 void *buffer1,
2475 uint32_t bytes1,
2476 void *buffer2,
2477 uint32_t bytes2,
2478 uint32_t framelen,
2479 bool intEnable,
2480 bool tsEnable,
2481 enet_qos_desc_flag flag,
2482 uint8_t slotNum)
2483 {
2484 uint32_t control = ENET_QOS_TXDESCRIP_RD_BL1(bytes1) | ENET_QOS_TXDESCRIP_RD_BL2(bytes2);
2485
2486 if (tsEnable)
2487 {
2488 control |= ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2489 }
2490 else
2491 {
2492 control &= ~ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2493 }
2494
2495 if (intEnable)
2496 {
2497 control |= ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2498 }
2499 else
2500 {
2501 control &= ~ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2502 }
2503
2504 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2505 buffer1 = (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)buffer1, kMEMORY_Local2DMA);
2506 buffer2 = (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)buffer2, kMEMORY_Local2DMA);
2507 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2508
2509 /* Preare the descriptor for transmit. */
2510 txDesc->buff1Addr = (uint32_t)(uintptr_t)(uint8_t *)buffer1;
2511 txDesc->buff2Addr = (uint32_t)(uintptr_t)(uint8_t *)buffer2;
2512 txDesc->buffLen = control;
2513
2514 /* Make sure all fields of descriptor are written before setting ownership */
2515 __DMB();
2516
2517 control = ENET_QOS_TXDESCRIP_RD_FL(framelen) | ENET_QOS_TXDESCRIP_RD_LDFD(flag) | ENET_QOS_TXDESCRIP_RD_OWN_MASK;
2518
2519 txDesc->controlStat = control;
2520
2521 /* Make sure the descriptor is written in memory (before MAC starts checking it) */
2522 __DSB();
2523 }
2524
2525 /*!
2526 * brief Configure a given tx descriptor.
2527 * This function is a low level functional API to setup or prepare
2528 * a given tx descriptor.
2529 *
2530 * param txDesc The given tx descriptor.
2531 * param config The tx descriptor configuration.
2532 *
2533 * note This must be called after all the ENET initilization.
2534 * And should be called when the ENET receive/transmit is required.
2535 * Transmit buffers are 'zero-copy' buffers, so the buffer must remain in
2536 * memory until the packet has been fully transmitted. The buffers
2537 * should be free or requeued in the transmit interrupt irq handler.
2538 */
ENET_QOS_ConfigTxDescriptor(enet_qos_tx_bd_struct_t * txDesc,enet_qos_tx_bd_config_struct_t * config)2539 static void ENET_QOS_ConfigTxDescriptor(enet_qos_tx_bd_struct_t *txDesc, enet_qos_tx_bd_config_struct_t *config)
2540 {
2541 uint32_t control = ENET_QOS_TXDESCRIP_RD_BL1(config->bytes1) | ENET_QOS_TXDESCRIP_RD_BL2(config->bytes2);
2542
2543 if (config->tsEnable)
2544 {
2545 control |= ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2546 }
2547 else
2548 {
2549 control &= ~ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2550 }
2551
2552 if (config->intEnable)
2553 {
2554 control |= ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2555 }
2556 else
2557 {
2558 control &= ~ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2559 }
2560
2561 /* Preare the descriptor for transmit. */
2562 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2563 txDesc->buff1Addr = MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)config->buffer1, kMEMORY_Local2DMA);
2564 txDesc->buff2Addr = MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)config->buffer2, kMEMORY_Local2DMA);
2565 #else
2566 txDesc->buff1Addr = (uint32_t)(uintptr_t)(uint8_t *)config->buffer1;
2567 txDesc->buff2Addr = (uint32_t)(uintptr_t)(uint8_t *)config->buffer2;
2568 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2569 txDesc->buffLen = control;
2570
2571 /* Make sure all fields of descriptor are written before setting ownership */
2572 __DMB();
2573
2574 control = ENET_QOS_TXDESCRIP_RD_FL(config->framelen) |
2575 ENET_QOS_TXDESCRIP_RD_CIC(config->txOffloadOps) | ENET_QOS_TXDESCRIP_RD_LDFD(config->flag) |
2576 ENET_QOS_TXDESCRIP_RD_OWN_MASK;
2577
2578 txDesc->controlStat = control;
2579
2580 /* Make sure the descriptor is written in memory (before MAC starts checking it) */
2581 __DSB();
2582 }
2583
2584 /*!
2585 * brief Reclaim tx descriptors.
2586 * This function is used to update the tx descriptor status and
2587 * store the tx timestamp when the 1588 feature is enabled.
2588 * This is called by the transmit interupt IRQ handler after the
2589 * complete of a frame transmission.
2590 *
2591 * param base ENET peripheral base address.
2592 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
2593 * param channel The tx DMA channnel.
2594 *
2595 */
ENET_QOS_ReclaimTxDescriptor(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t channel)2596 void ENET_QOS_ReclaimTxDescriptor(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
2597 {
2598 enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[channel];
2599 enet_qos_tx_bd_struct_t *txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
2600 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2601 enet_qos_frame_info_t *txDirty = NULL;
2602 uint32_t control, primask;
2603
2604 control = txDesc->controlStat;
2605
2606 /* Need to update the first index for transmit buffer free. */
2607 while ((txBdRing->txDescUsed > 0U) && (0U == (control & ENET_QOS_TXDESCRIP_RD_OWN_MASK)))
2608 {
2609 if ((control & ENET_QOS_TXDESCRIP_RD_LD_MASK) != 0U)
2610 {
2611 if (ENET_QOS_TxDirtyRingAvailable(txDirtyRing))
2612 {
2613 txDirty = &txDirtyRing->txDirtyBase[txBdRing->txConsumIdx];
2614 txDirtyRing->txGenIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2615 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2616 {
2617 txDirtyRing->isFull = true;
2618 }
2619
2620 if ((control & ENET_QOS_TXDESCRIP_WB_TTSS_MASK) != 0U)
2621 {
2622 enet_qos_ptp_time_t *ts = &txDirty->timeStamp;
2623 uint32_t nanosecond;
2624 /* Get transmit time stamp second. */
2625 nanosecond = txDesc->buff1Addr;
2626 txDirty->isTsAvail = true;
2627 if (0U == (base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK))
2628 {
2629 /* Binary rollover, 0.465ns accuracy. */
2630 nanosecond = (nanosecond * 465U) / 1000U;
2631 }
2632 ts->second = txDesc->buff2Addr;
2633 ts->nanosecond = nanosecond;
2634 }
2635 else
2636 {
2637 txDirty->isTsAvail = false;
2638 }
2639 }
2640 }
2641
2642 /* For tx buffer free or requeue for each descriptor.
2643 * The tx interrupt callback should free/requeue the tx buffer. */
2644 if (handle->callback != NULL)
2645 {
2646 handle->callback(base, handle, kENET_QOS_TxIntEvent, channel, handle->userData);
2647 }
2648
2649 primask = DisableGlobalIRQ();
2650 txBdRing->txDescUsed--;
2651 EnableGlobalIRQ(primask);
2652
2653 /* Update the txConsumIdx/txDesc. */
2654 txBdRing->txConsumIdx = ENET_QOS_IncreaseIndex(txBdRing->txConsumIdx, txBdRing->txRingLen);
2655 txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
2656 control = txDesc->controlStat;
2657 }
2658 }
2659
2660 /*!
2661 * brief Transmits an ENET frame.
2662 * note The CRC is automatically appended to the data. Input the data
2663 * to send without the CRC.
2664 *
2665 * param base ENET peripheral base address.
2666 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
2667 * param data The data buffer provided by user to be send.
2668 * param length The length of the data to be send.
2669 * param channel Channel to send the frame, same with queue index.
2670 * param isNeedTs True means save timestamp
2671 * param context pointer to user context to be kept in the tx dirty frame information.
2672 * param txOffloadOps The Tx frame checksum offload option.
2673 * retval kStatus_Success Send frame succeed.
2674 * retval kStatus_ENET_QOS_TxFrameBusy Transmit buffer descriptor is busy under transmission.
2675 * The transmit busy happens when the data send rate is over the MAC capacity.
2676 * The waiting mechanism is recommended to be added after each call return with
2677 * kStatus_ENET_QOS_TxFrameBusy.
2678 */
ENET_QOS_SendFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t * data,uint32_t length,uint8_t channel,bool isNeedTs,void * context,enet_qos_tx_offload_t txOffloadOps)2679 status_t ENET_QOS_SendFrame(ENET_QOS_Type *base,
2680 enet_qos_handle_t *handle,
2681 uint8_t *data,
2682 uint32_t length,
2683 uint8_t channel,
2684 bool isNeedTs,
2685 void *context,
2686 enet_qos_tx_offload_t txOffloadOps)
2687 {
2688 assert(handle != NULL);
2689 assert(data != NULL);
2690 assert(channel < handle->txQueueUse);
2691
2692 enet_qos_tx_bd_config_struct_t txDescConfig;
2693 enet_qos_tx_bd_ring_t *txBdRing;
2694 enet_qos_tx_bd_struct_t *txDesc;
2695 enet_qos_tx_dirty_ring_t *txDirtyRing;
2696 enet_qos_frame_info_t *txDirty;
2697 uint32_t primask;
2698 uint32_t txDescTail;
2699
2700 if (txOffloadOps != kENET_QOS_TxOffloadDisable)
2701 {
2702 assert(((uint32_t)FSL_FEATURE_ENET_QOS_TX_OFFLOAD_QUEUE_SUPPORT_BITMAP & ((uint32_t)1U << channel)) != 0U);
2703 }
2704
2705 if (length > 2U * ENET_QOS_TXDESCRIP_RD_BL1_MASK)
2706 {
2707 return kStatus_ENET_QOS_TxFrameOverLen;
2708 }
2709
2710 /* Check if the DMA owns the descriptor. */
2711 txBdRing = (enet_qos_tx_bd_ring_t *)&handle->txBdRing[channel];
2712 txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
2713 if (txBdRing->txRingLen == txBdRing->txDescUsed)
2714 {
2715 return kStatus_ENET_QOS_TxFrameBusy;
2716 }
2717
2718 txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2719 txDirty = &txDirtyRing->txDirtyBase[txBdRing->txGenIdx];
2720 txDirty->context = context;
2721
2722 /* Fill the descriptor. */
2723 txDescConfig.framelen = length;
2724 txDescConfig.flag = kENET_QOS_FirstLastFlag;
2725 txDescConfig.intEnable = true;
2726 txDescConfig.tsEnable = isNeedTs;
2727 txDescConfig.txOffloadOps = txOffloadOps;
2728
2729 if (length <= ENET_QOS_TXDESCRIP_RD_BL1_MASK)
2730 {
2731 txDescConfig.buffer1 = data;
2732 txDescConfig.bytes1 = length;
2733 txDescConfig.buffer2 = NULL;
2734 txDescConfig.bytes2 = 0;
2735 }
2736 else
2737 {
2738 txDescConfig.buffer1 = data;
2739 txDescConfig.bytes1 = ENET_QOS_TXDESCRIP_RD_BL1_MASK;
2740 txDescConfig.buffer2 = &data[ENET_QOS_TXDESCRIP_RD_BL1_MASK];
2741 txDescConfig.bytes2 = length - ENET_QOS_TXDESCRIP_RD_BL1_MASK;
2742 }
2743 ENET_QOS_ConfigTxDescriptor(txDesc, &txDescConfig);
2744
2745 /* Increase the index. */
2746 txBdRing->txGenIdx = ENET_QOS_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
2747 /* Disable interrupt first and then enable interrupt to avoid the race condition. */
2748 primask = DisableGlobalIRQ();
2749 txBdRing->txDescUsed++;
2750 EnableGlobalIRQ(primask);
2751
2752 /* Update the transmit tail address. */
2753 txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
2754 if (txBdRing->txGenIdx == 0U)
2755 {
2756 txDesc = &txBdRing->txBdBase[txBdRing->txRingLen];
2757 }
2758 txDescTail = (uint32_t)(uintptr_t)txDesc & ~ENET_QOS_ADDR_ALIGNMENT;
2759 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2760 txDescTail = MEMORY_ConvertMemoryMapAddress(txDescTail, kMEMORY_Local2DMA);
2761 #endif
2762 base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR = txDescTail;
2763
2764 return kStatus_Success;
2765 }
2766
2767 /*!
2768 * brief Gets the sent frame.
2769 *
2770 * This function is used to get the sent frame for timestamp and buffer clean operation.
2771 *
2772 * param handle The ENET handler pointer.This is the same state pointer used in
2773 * ENET_QOS_Init.
2774 * param txFrame Input parameter, pointer to enet_qos_frame_info_t for saving read out frame information.
2775 * param channel Read out frame from specified channel.
2776 */
ENET_QOS_GetTxFrame(enet_qos_handle_t * handle,enet_qos_frame_info_t * txFrame,uint8_t channel)2777 void ENET_QOS_GetTxFrame(enet_qos_handle_t *handle, enet_qos_frame_info_t *txFrame, uint8_t channel)
2778 {
2779 assert(handle != NULL);
2780 assert(channel < handle->txQueueUse);
2781
2782 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2783 enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
2784
2785 (void)memcpy(txFrame, txDirty, sizeof(enet_qos_frame_info_t));
2786
2787 txDirtyRing->isFull = false;
2788 txDirtyRing->txConsumIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txConsumIdx, txDirtyRing->txRingLen);
2789 }
2790
ENET_QOS_GetRxFrameErr(enet_qos_rx_bd_struct_t * rxDesc,enet_qos_rx_frame_error_t * rxFrameError)2791 static inline void ENET_QOS_GetRxFrameErr(enet_qos_rx_bd_struct_t *rxDesc, enet_qos_rx_frame_error_t *rxFrameError)
2792 {
2793 uint32_t rdes2 = rxDesc->buff2Addr;
2794 uint32_t rdes3 = rxDesc->control;
2795
2796 (void)memset(rxFrameError, 0, sizeof(enet_qos_rx_frame_error_t));
2797
2798 if ((rdes2 & ENET_QOS_RXDESCRIP_WR_SA_FAILURE_MASK) != 0U)
2799 {
2800 rxFrameError->rxSrcAddrFilterErr = true;
2801 }
2802 if ((rdes2 & ENET_QOS_RXDESCRIP_WR_DA_FAILURE_MASK) != 0U)
2803 {
2804 rxFrameError->rxDstAddrFilterErr = true;
2805 }
2806 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_DE_MASK) != 0U)
2807 {
2808 rxFrameError->rxDstAddrFilterErr = true;
2809 }
2810 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RE_MASK) != 0U)
2811 {
2812 rxFrameError->rxReceiveErr = true;
2813 }
2814 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_OE_MASK) != 0U)
2815 {
2816 rxFrameError->rxOverFlowErr = true;
2817 }
2818 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RWT_MASK) != 0U)
2819 {
2820 rxFrameError->rxWatchDogErr = true;
2821 }
2822 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_GP_MASK) != 0U)
2823 {
2824 rxFrameError->rxGaintPacketErr = true;
2825 }
2826 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_CRC_MASK) != 0U)
2827 {
2828 rxFrameError->rxCrcErr = true;
2829 }
2830 }
2831
2832 /*!
2833 * brief Receives one frame in specified BD ring with zero copy.
2834 *
2835 * This function will use the user-defined allocate and free callback. Every time application gets one frame through
2836 * this function, driver will allocate new buffers for the BDs whose buffers have been taken by application.
2837 * note This function will drop current frame and update related BDs as available for DMA if new buffers allocating
2838 * fails. Application must provide a memory pool including at least BD number + 1 buffers(+2 if enable double buffer)
2839 * to make this function work normally. If user calls this function in Rx interrupt handler, be careful that this
2840 * function makes Rx BD ready with allocating new buffer(normal) or updating current BD(out of memory). If there's
2841 * always new Rx frame input, Rx interrupt will be triggered forever. Application need to disable Rx interrupt according
2842 * to specific design in this case.
2843 *
2844 * param base ENET peripheral base address.
2845 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_Init.
2846 * param rxFrame The received frame information structure provided by user.
2847 * param ringId The ring index or ring number.
2848 * retval kStatus_Success Succeed to get one frame and allocate new memory for Rx buffer.
2849 * retval kStatus_ENET_QOS_RxFrameEmpty There's no Rx frame in the BD.
2850 * retval kStatus_ENET_QOS_RxFrameError There's issue in this receiving.
2851 * retval kStatus_ENET_QOS_RxFrameDrop There's no new buffer memory for BD, drop this frame.
2852 */
ENET_QOS_GetRxFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_rx_frame_struct_t * rxFrame,uint8_t channel)2853 status_t ENET_QOS_GetRxFrame(ENET_QOS_Type *base,
2854 enet_qos_handle_t *handle,
2855 enet_qos_rx_frame_struct_t *rxFrame,
2856 uint8_t channel)
2857 {
2858 assert(handle != NULL);
2859 assert(channel < handle->rxQueueUse);
2860
2861 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2862 enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2863 uint16_t index = rxBdRing->rxGenIdx;
2864 status_t result = kStatus_Success;
2865 uintptr_t buff1Addr = 0;
2866 uintptr_t buff2Addr = 0;
2867 uint16_t buff1Len = 0;
2868 uint16_t buff2Len = 0;
2869 uint16_t offset = 0;
2870 void *newBuff1 = NULL;
2871 void *newBuff2 = NULL;
2872 bool isDrop = false;
2873 bool isLastBuff = false;
2874 bool tsAvailable = false;
2875 uint32_t rxDescTail;
2876
2877 /* Check the frame status. */
2878 do
2879 {
2880 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
2881 {
2882 result = kStatus_ENET_QOS_RxFrameEmpty;
2883 break;
2884 }
2885
2886 /* Check timestamp and error. */
2887 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2888 {
2889 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
2890 {
2891 if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
2892 {
2893 /* Context descriptor is expected but might not be yet available. */
2894 uint8_t retryTimes = 10;
2895
2896 while (((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
2897 ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
2898 {
2899 /* Timsstamp value is not corrupted. */
2900 if ((rxDesc->buff1Addr != 0xFFFFFFFFU) && (rxDesc->buff2Addr != 0xFFFFFFFFU))
2901 {
2902 break;
2903 }
2904 if (retryTimes-- == 0U)
2905 {
2906 break;
2907 }
2908 }
2909
2910 if (retryTimes != 0U)
2911 {
2912 tsAvailable = true;
2913 }
2914 else
2915 {
2916 result = kStatus_ENET_QOS_RxFrameEmpty;
2917 break;
2918 }
2919 }
2920 }
2921
2922 /* Get the frame error if there is. */
2923 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
2924 {
2925 ENET_QOS_GetRxFrameErr(rxDesc, &rxFrame->rxFrameError);
2926 result = kStatus_ENET_QOS_RxFrameError;
2927 }
2928 else if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) == 0U)
2929 {
2930 result = kStatus_ENET_QOS_RxFrameEmpty;
2931 }
2932 else
2933 {
2934 /* Intentional empty */
2935 }
2936 break;
2937 }
2938
2939 index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
2940 if (index == rxBdRing->rxGenIdx)
2941 {
2942 result = kStatus_ENET_QOS_RxFrameEmpty;
2943 break;
2944 }
2945 rxDesc = &rxBdRing->rxBdBase[index];
2946 } while (index != rxBdRing->rxGenIdx);
2947
2948 /* Drop the error frame and return error. */
2949 if (result != kStatus_Success)
2950 {
2951 if (result == kStatus_ENET_QOS_RxFrameError)
2952 {
2953 ENET_QOS_DropFrame(base, handle, channel);
2954 }
2955 return result;
2956 }
2957
2958 /* Get the valid frame */
2959 index = 0;
2960 do
2961 {
2962 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2963
2964 /* Caculate the buffer and frame length. */
2965 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2966 {
2967 isLastBuff = true;
2968 rxFrame->totLen = (uint16_t)(rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK);
2969
2970 if (rxFrame->totLen - offset > (uint16_t)rxBdRing->rxBuffSizeAlign)
2971 {
2972 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2973 if (handle->doubleBuffEnable)
2974 {
2975 buff2Len = rxFrame->totLen - offset - (uint16_t)rxBdRing->rxBuffSizeAlign - ENET_QOS_FCS_LEN;
2976 }
2977 }
2978 else
2979 {
2980 buff1Len = rxFrame->totLen - offset - ENET_QOS_FCS_LEN;
2981 }
2982 rxFrame->totLen -= ENET_QOS_FCS_LEN;
2983 }
2984 else
2985 {
2986 if (!handle->doubleBuffEnable)
2987 {
2988 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2989 offset += buff1Len;
2990 }
2991 else
2992 {
2993 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2994 buff2Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2995 offset += buff1Len + buff2Len;
2996 }
2997 }
2998
2999 /* Allocate new buffer to replace the buffer taken by application */
3000 newBuff1 = handle->rxBuffAlloc(base, handle->userData, channel);
3001 if (newBuff1 == NULL)
3002 {
3003 isDrop = true;
3004 }
3005 else if (handle->doubleBuffEnable && (buff2Len != 0U))
3006 {
3007 newBuff2 = handle->rxBuffAlloc(base, handle->userData, channel);
3008 if (newBuff2 == NULL)
3009 {
3010 handle->rxBuffFree(base, newBuff1, handle->userData, channel);
3011 isDrop = true;
3012 }
3013 }
3014 else
3015 {
3016 /* Intentional empty */
3017 }
3018
3019 if (!isDrop)
3020 {
3021 /* Get the frame data information into Rx frame structure. */
3022 if (!handle->doubleBuffEnable)
3023 {
3024 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
3025 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3026 buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
3027 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
3028 if (handle->rxMaintainEnable[channel])
3029 {
3030 ENET_QOS_DcacheInvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
3031 }
3032 rxFrame->rxBuffArray[index].buffer = (void *)(uint8_t *)buff1Addr;
3033 rxFrame->rxBuffArray[index].length = buff1Len;
3034 index++;
3035 }
3036 else
3037 {
3038 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
3039 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3040 buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
3041 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
3042 if (handle->rxMaintainEnable[channel])
3043 {
3044 ENET_QOS_DcacheInvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
3045 }
3046 rxFrame->rxBuffArray[index].buffer = (void *)(uint8_t *)buff1Addr;
3047 rxFrame->rxBuffArray[index].length = buff1Len;
3048 index++;
3049
3050 /* If there's no data in buffer2, not add it into rxFrame */
3051 if (buff2Len != 0U)
3052 {
3053 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
3054 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3055 buff2Addr = MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local);
3056 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
3057 if (handle->rxMaintainEnable[channel])
3058 {
3059 ENET_QOS_DcacheInvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
3060 }
3061 rxFrame->rxBuffArray[index].buffer = (void *)(uint8_t *)buff2Addr;
3062 rxFrame->rxBuffArray[index].length = buff2Len;
3063 index++;
3064 }
3065 }
3066
3067 /* Give new buffer from application to BD */
3068 if (!handle->doubleBuffEnable)
3069 {
3070 if (handle->rxMaintainEnable[channel])
3071 {
3072 ENET_QOS_DcacheInvalidateByRange((uintptr_t)(uint8_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
3073 }
3074 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3075 buff1Addr = MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)newBuff1, kMEMORY_Local2DMA);
3076 #else
3077 buff1Addr = (uintptr_t)(uint8_t *)newBuff1;
3078 #endif
3079 handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx] = buff1Addr;
3080 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
3081 handle->doubleBuffEnable);
3082 }
3083 else
3084 {
3085 if (handle->rxMaintainEnable[channel])
3086 {
3087 ENET_QOS_DcacheInvalidateByRange((uintptr_t)(uint8_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
3088 }
3089 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3090 buff1Addr = MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)newBuff1, kMEMORY_Local2DMA);
3091 #else
3092 buff1Addr = (uintptr_t)(uint8_t *)newBuff1;
3093 #endif
3094 handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx] = buff1Addr;
3095
3096 if (buff2Len != 0U)
3097 {
3098 if (handle->rxMaintainEnable[channel])
3099 {
3100 ENET_QOS_DcacheInvalidateByRange((uintptr_t)(uint8_t *)newBuff2, rxBdRing->rxBuffSizeAlign);
3101 }
3102 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3103 buff2Addr =
3104 (uint32_t)MEMORY_ConvertMemoryMapAddress((uintptr_t)(uint8_t *)newBuff2, kMEMORY_Local2DMA);
3105 #else
3106 buff2Addr = (uintptr_t)(uint8_t *)newBuff2;
3107 #endif
3108 handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U] = buff2Addr;
3109 }
3110 else
3111 {
3112 /* If there's no data in buffer2, keep it */
3113 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
3114 }
3115
3116 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
3117 handle->rxintEnable, handle->doubleBuffEnable);
3118 }
3119 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
3120
3121 /* Update context BD if there is */
3122 if (isLastBuff && tsAvailable)
3123 {
3124 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
3125 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U)
3126 {
3127 ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, &rxFrame->rxAttribute.timestamp);
3128 rxFrame->rxAttribute.isTsAvail = true;
3129
3130 if (!handle->doubleBuffEnable)
3131 {
3132 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
3133 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
3134 handle->doubleBuffEnable);
3135 }
3136 else
3137 {
3138 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
3139 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
3140 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
3141 handle->rxintEnable, handle->doubleBuffEnable);
3142 }
3143 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
3144 }
3145 }
3146 /* Always try to start receive, in case it had stopped */
3147 rxDescTail = (uint32_t)(uintptr_t)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
3148 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
3149 rxDescTail = MEMORY_ConvertMemoryMapAddress(rxDescTail, kMEMORY_Local2DMA);
3150 #endif
3151 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = rxDescTail;
3152 }
3153 else
3154 {
3155 /* Drop frame if there's no new buffer memory */
3156
3157 /* Free the incomplete frame buffers. */
3158 while (index-- != 0U)
3159 {
3160 handle->rxBuffFree(base, rxFrame->rxBuffArray[index].buffer, handle->userData, channel);
3161 }
3162
3163 /* Update all left BDs of this frame from current index. */
3164 ENET_QOS_DropFrame(base, handle, channel);
3165
3166 result = kStatus_ENET_QOS_RxFrameDrop;
3167 break;
3168 }
3169 } while (!isLastBuff);
3170
3171 return result;
3172 }
3173
3174 /*!
3175 * brief Gets the current ENET time from the PTP 1588 timer without IRQ disable.
3176 *
3177 * param base ENET peripheral base address.
3178 * param second The PTP 1588 system timer second.
3179 * param nanosecond The PTP 1588 system timer nanosecond.
3180 * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
3181 */
ENET_QOS_Ptp1588GetTimerNoIRQDisable(ENET_QOS_Type * base,uint64_t * second,uint32_t * nanosecond)3182 void ENET_QOS_Ptp1588GetTimerNoIRQDisable(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
3183 {
3184 assert(second != NULL);
3185 assert(nanosecond != NULL);
3186
3187 uint32_t high_sec[2];
3188 uint32_t sec[2];
3189
3190 /* Get the current PTP time. */
3191 /* Since register reads are not atomic, we need to check for wraps during the read */
3192 high_sec[1] = base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
3193
3194 do
3195 {
3196 high_sec[0] = high_sec[1];
3197
3198 sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
3199
3200 do
3201 {
3202 sec[0] = sec[1];
3203 *nanosecond = base->MAC_SYSTEM_TIME_NANOSECONDS & ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK;
3204 sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
3205 } while (sec[1] != sec[0]);
3206
3207 high_sec[1] =
3208 base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
3209 } while (high_sec[1] != high_sec[0]);
3210
3211 *second = ((uint64_t)high_sec[1] << 32U) | sec[1];
3212
3213 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
3214 {
3215 /* Binary rollover, the unit of the increment is ~ 0.465 ns. */
3216 *nanosecond = (*nanosecond * 465U) / 1000U;
3217 }
3218 }
3219
3220 /*!
3221 * brief Gets the current ENET time from the PTP 1588 timer, get a more accurate value
3222 * with IRQ disabled during get timer.
3223 *
3224 * param base ENET peripheral base address.
3225 * param second The PTP 1588 system timer second.
3226 * param nanosecond The PTP 1588 system timer nanosecond.
3227 * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
3228 */
ENET_QOS_Ptp1588GetTimer(ENET_QOS_Type * base,uint64_t * second,uint32_t * nanosecond)3229 void ENET_QOS_Ptp1588GetTimer(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
3230 {
3231 uint32_t primask;
3232
3233 /* Disables the interrupt. */
3234 primask = DisableGlobalIRQ();
3235
3236 ENET_QOS_Ptp1588GetTimerNoIRQDisable(base, second, nanosecond);
3237
3238 /* Enables the interrupt. */
3239 EnableGlobalIRQ(primask);
3240 }
3241
3242 /*!
3243 * brief Coreect the ENET PTP 1588 timer in coarse method.
3244 *
3245 * param base ENET peripheral base address.
3246 * param operation The system time operation, refer to "enet_qos_systime_op"
3247 * param second The correction second.
3248 * param nanosecond The correction nanosecond.
3249 */
ENET_QOS_Ptp1588CorrectTimerInCoarse(ENET_QOS_Type * base,enet_qos_systime_op operation,uint32_t second,uint32_t nanosecond)3250 status_t ENET_QOS_Ptp1588CorrectTimerInCoarse(ENET_QOS_Type *base,
3251 enet_qos_systime_op operation,
3252 uint32_t second,
3253 uint32_t nanosecond)
3254 {
3255 uint32_t corrSecond = second;
3256 uint32_t corrNanosecond;
3257 status_t result = kStatus_Success;
3258
3259 /* Set the system timer. */
3260 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) != 0U)
3261 {
3262 if (operation == kENET_QOS_SystimeSubtract)
3263 {
3264 /* Set with the complement of the sub-second. */
3265 corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
3266 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
3267 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(ENET_QOS_NANOSECS_ONESECOND - nanosecond);
3268 }
3269 else
3270 {
3271 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
3272 }
3273 }
3274 else
3275 {
3276 nanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK / ENET_QOS_NANOSECS_ONESECOND * nanosecond;
3277 if (operation == kENET_QOS_SystimeSubtract)
3278 {
3279 /* Set with the complement of the sub-second. */
3280 corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
3281 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
3282 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(
3283 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK + 1U - nanosecond);
3284 }
3285 else
3286 {
3287 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
3288 }
3289 }
3290
3291 base->MAC_SYSTEM_TIME_SECONDS_UPDATE = corrSecond;
3292 base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = corrNanosecond;
3293
3294 /* Update the timer. */
3295 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK;
3296
3297 /* Wait for update finish */
3298 result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK, 0U);
3299
3300 return result;
3301 }
3302
3303 /*!
3304 * brief Correct the ENET PTP 1588 timer in fine method.
3305 *
3306 *
3307 * param base ENET peripheral base address.
3308 * param addend The addend value to be set in the fine method
3309 * note Should take refer to the chapter "System time correction" and
3310 * see the description for the "fine correction method".
3311 */
ENET_QOS_Ptp1588CorrectTimerInFine(ENET_QOS_Type * base,uint32_t addend)3312 status_t ENET_QOS_Ptp1588CorrectTimerInFine(ENET_QOS_Type *base, uint32_t addend)
3313 {
3314 status_t result = kStatus_Success;
3315
3316 base->MAC_TIMESTAMP_ADDEND = addend;
3317 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK;
3318
3319 result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK, 0U);
3320
3321 return result;
3322 }
3323
3324 /*!
3325 * @brief Sets the ENET OQS PTP 1588 PPS target time registers.
3326 *
3327 * param base ENET QOS peripheral base address.
3328 * param instance The ENET QOS PTP PPS instance.
3329 * param seconds The target seconds.
3330 * param nanoseconds The target nanoseconds.
3331 */
ENET_QOS_Ptp1588PpsSetTrgtTime(ENET_QOS_Type * base,enet_qos_ptp_pps_instance_t instance,uint32_t seconds,uint32_t nanoseconds)3332 status_t ENET_QOS_Ptp1588PpsSetTrgtTime(ENET_QOS_Type *base,
3333 enet_qos_ptp_pps_instance_t instance,
3334 uint32_t seconds,
3335 uint32_t nanoseconds)
3336 {
3337 uint32_t *mac_pps_trgt_ns;
3338 uint32_t *mac_pps_trgt_s;
3339
3340 mac_pps_trgt_ns = (uint32_t *)((uintptr_t)&base->MAC_PPS0_TARGET_TIME_NANOSECONDS + 0x10U * (uint32_t)instance);
3341 mac_pps_trgt_s = (uint32_t *)((uintptr_t)&base->MAC_PPS0_TARGET_TIME_SECONDS + 0x10U * (uint32_t)instance);
3342
3343 if ((*mac_pps_trgt_ns & ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TRGTBUSY0_MASK) != 0U)
3344 {
3345 return kStatus_ENET_QOS_TrgtBusy;
3346 }
3347
3348 *mac_pps_trgt_ns = ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TTSL0(nanoseconds);
3349 *mac_pps_trgt_s = ENET_QOS_MAC_PPS0_TARGET_TIME_SECONDS_TSTRH0(seconds);
3350
3351 return kStatus_Success;
3352 }
3353
ENET_QOS_EstReadWriteWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr,uint8_t read,uint8_t dbgm)3354 static status_t ENET_QOS_EstReadWriteWord(
3355 ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t read, uint8_t dbgm)
3356 {
3357 uint32_t ctrl;
3358 int retry = 10;
3359
3360 ctrl = ENET_QOS_MTL_EST_GCL_CONTROL_ADDR(addr) | ENET_QOS_MTL_EST_GCL_CONTROL_SRWO(1) |
3361 ENET_QOS_MTL_EST_GCL_CONTROL_DBGM(dbgm) | ENET_QOS_MTL_EST_GCL_CONTROL_GCRR(gcrr);
3362
3363 if (read != 0U)
3364 {
3365 ctrl |= ENET_QOS_MTL_EST_GCL_CONTROL_R1W0(1);
3366 }
3367 else
3368 {
3369 base->MTL_EST_GCL_DATA = *data;
3370 }
3371
3372 base->MTL_EST_GCL_CONTROL = ctrl;
3373
3374 while ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_SRWO_MASK) != 0U)
3375 {
3376 if (retry-- < 0)
3377 {
3378 return kStatus_Timeout;
3379 }
3380 SDK_DelayAtLeastUs(1, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
3381 }
3382
3383 if (read != 0U)
3384 {
3385 *data = base->MTL_EST_GCL_DATA;
3386 }
3387
3388 if ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_ERR0_MASK) != 0U)
3389 {
3390 return kStatus_ENET_QOS_Est_SwListWriteAbort;
3391 }
3392
3393 return kStatus_Success;
3394 }
3395
ENET_QOS_EstProgramWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr)3396 static status_t ENET_QOS_EstProgramWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr)
3397 {
3398 return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 0, 0);
3399 }
3400
ENET_QOS_EstReadWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr,uint8_t dbgm)3401 static status_t ENET_QOS_EstReadWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t dbgm)
3402 {
3403 return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 1, dbgm);
3404 }
3405
3406 /*!
3407 * @brief Program Gate Control List.
3408 *
3409 * This function is used to program the Enhanced Scheduled Transmisson. (IEEE802.1Qbv)
3410 *
3411 * @param base ENET peripheral base address..
3412 * @param gcl Pointer to the Gate Control List structure.
3413 * @param ptpClk_Hz frequency of the PTP clock.
3414 */
ENET_QOS_EstProgramGcl(ENET_QOS_Type * base,enet_qos_est_gcl_t * gcl,uint32_t ptpClk_Hz)3415 status_t ENET_QOS_EstProgramGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t ptpClk_Hz)
3416 {
3417 assert(gcl != NULL);
3418 uint32_t i, control, data;
3419 enet_qos_est_gate_op_t *gateOp;
3420 status_t rc;
3421
3422 #define EST_MAX_INTERVAL ((1UL << ENET_QOS_EST_WID) - 1U)
3423 #define EST_MAX_GATE ((1UL << (32U - ENET_QOS_EST_WID)) - 1U)
3424
3425 if (!gcl->enable)
3426 {
3427 goto exit;
3428 }
3429
3430 /* Sanity checks */
3431 if (gcl->numEntries > ENET_QOS_EST_DEP)
3432 {
3433 return kStatus_ENET_QOS_Est_InvalidParameter;
3434 }
3435
3436 if (gcl->opList == NULL)
3437 {
3438 return kStatus_ENET_QOS_Est_InvalidParameter;
3439 }
3440
3441 gateOp = gcl->opList;
3442
3443 for (i = 0; i < gcl->numEntries; i++)
3444 {
3445 if (gateOp->interval > EST_MAX_INTERVAL)
3446 {
3447 return kStatus_ENET_QOS_Est_InvalidParameter;
3448 }
3449 if (gateOp->gate > EST_MAX_GATE)
3450 {
3451 return kStatus_ENET_QOS_Est_InvalidParameter;
3452 }
3453 gateOp++;
3454 }
3455
3456 /* Check if sw list is busy */
3457 if ((base->MTL_EST_CONTROL & ENET_QOS_MTL_EST_CONTROL_SSWL_MASK) != 0U)
3458 {
3459 return kStatus_ENET_QOS_Est_SwListBusy;
3460 }
3461
3462 gateOp = gcl->opList;
3463
3464 for (i = 0; i < gcl->numEntries; i++)
3465 {
3466 data = gateOp->interval | (gateOp->gate << ENET_QOS_EST_WID);
3467 rc = ENET_QOS_EstProgramWord(base, i, &data, 0);
3468 if (rc != kStatus_Success)
3469 {
3470 return rc;
3471 }
3472
3473 gateOp++;
3474 }
3475
3476 /* BTR High */
3477 data = (uint32_t)(gcl->baseTime >> 32U);
3478 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1U);
3479 if (rc != kStatus_Success)
3480 {
3481 return rc;
3482 }
3483
3484 /* BTR Low */
3485 data = (uint32_t)gcl->baseTime;
3486 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1);
3487 if (rc != kStatus_Success)
3488 {
3489 return rc;
3490 }
3491
3492 /* CTR High */
3493 data = (uint32_t)(gcl->cycleTime >> 32U);
3494 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1);
3495 if (rc != kStatus_Success)
3496 {
3497 return rc;
3498 }
3499
3500 /* CTR Low */
3501 data = (uint32_t)gcl->cycleTime;
3502 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1);
3503 if (rc != kStatus_Success)
3504 {
3505 return rc;
3506 }
3507
3508 /* TER */
3509 data = gcl->extTime;
3510 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1);
3511 if (rc != kStatus_Success)
3512 {
3513 return rc;
3514 }
3515
3516 /* LLR */
3517 data = gcl->numEntries;
3518 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1);
3519 if (rc != kStatus_Success)
3520 {
3521 return rc;
3522 }
3523
3524 exit:
3525 control = base->MTL_EST_CONTROL;
3526
3527 if (gcl->enable)
3528 {
3529 control &= ~ENET_QOS_MTL_EST_CONTROL_PTOV_MASK;
3530 control |= ENET_QOS_MTL_EST_CONTROL_SSWL_MASK | ENET_QOS_MTL_EST_CONTROL_EEST_MASK |
3531 ENET_QOS_MTL_EST_CONTROL_PTOV((1000000000U / ptpClk_Hz) * 6U);
3532 }
3533 else
3534 {
3535 control &= ~ENET_QOS_MTL_EST_CONTROL_EEST_MASK;
3536 }
3537
3538 base->MTL_EST_CONTROL = control;
3539
3540 return kStatus_Success;
3541 }
3542
3543 /*!
3544 * @brief Read Gate Control List.
3545 *
3546 * This function is used to read the Enhanced Scheduled Transmisson list. (IEEE802.1Qbv)
3547 *
3548 * @param base ENET peripheral base address..
3549 * @param gcl Pointer to the Gate Control List structure.
3550 * @param listLen length of the provided opList array in gcl structure.
3551 * @param hwList Boolean if True read HW list, false read SW list.
3552 */
ENET_QOS_EstReadGcl(ENET_QOS_Type * base,enet_qos_est_gcl_t * gcl,uint32_t listLen,bool hwList)3553 status_t ENET_QOS_EstReadGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t listLen, bool hwList)
3554 {
3555 assert(gcl != NULL);
3556 assert(gcl->opList != NULL);
3557 uint8_t dbgm = 0;
3558 uint32_t data, i;
3559 enet_qos_est_gate_op_t *gateOp;
3560 status_t rc;
3561
3562 if (hwList == true)
3563 {
3564 dbgm = 1;
3565 }
3566
3567 /* LLR */
3568 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1, dbgm);
3569 if (rc != kStatus_Success)
3570 {
3571 return rc;
3572 }
3573
3574 gcl->numEntries = data;
3575
3576 if (gcl->numEntries > listLen)
3577 {
3578 return kStatus_ENET_QOS_Est_InvalidParameter;
3579 }
3580
3581 /* BTR High */
3582 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1, dbgm);
3583 if (rc != kStatus_Success)
3584 {
3585 return rc;
3586 }
3587
3588 gcl->baseTime = (uint64_t)data << 32U;
3589
3590 /* BTR Low */
3591 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1, dbgm);
3592 if (rc != kStatus_Success)
3593 {
3594 return rc;
3595 }
3596
3597 gcl->baseTime |= data;
3598
3599 /* CTR High */
3600 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1, dbgm);
3601 if (rc != kStatus_Success)
3602 {
3603 return rc;
3604 }
3605
3606 gcl->cycleTime = (uint64_t)data << 32U;
3607
3608 /* CTR Low */
3609 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1, dbgm);
3610 if (rc != kStatus_Success)
3611 {
3612 return rc;
3613 }
3614
3615 gcl->cycleTime |= data;
3616
3617 /* TER */
3618 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1, dbgm);
3619 if (rc != kStatus_Success)
3620 {
3621 return rc;
3622 }
3623
3624 gcl->extTime = data;
3625
3626 gateOp = gcl->opList;
3627
3628 for (i = 0; i < gcl->numEntries; i++)
3629 {
3630 rc = ENET_QOS_EstReadWord(base, i, &data, 0, dbgm);
3631 if (rc != kStatus_Success)
3632 {
3633 return rc;
3634 }
3635
3636 gateOp->interval = data & (EST_MAX_INTERVAL);
3637 gateOp->gate = data >> ENET_QOS_EST_WID;
3638 gateOp++;
3639 }
3640
3641 return kStatus_Success;
3642 }
3643
3644 /*!
3645 * brief Read flexible rx parser configuration at specified index.
3646 *
3647 * This function is used to read flexible rx parser configuration at specified index.
3648 *
3649 * param base ENET peripheral base address..
3650 * param rxpConfig The rx parser configuration pointer.
3651 * param entryIndex The rx parser entry index to read, start from 0.
3652 * retval kStatus_Success Configure rx parser success.
3653 * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
3654 */
ENET_QOS_ReadRxParser(ENET_QOS_Type * base,enet_qos_rxp_config_t * rxpConfig,uint16_t entryIndex)3655 status_t ENET_QOS_ReadRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryIndex)
3656 {
3657 assert(rxpConfig != NULL);
3658 assert(entryIndex < ENET_QOS_RXP_ENTRY_COUNT);
3659
3660 uint32_t *dataPtr;
3661 uint8_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
3662 uint32_t value = 0U;
3663 status_t result = kStatus_Success;
3664
3665 /* Wait hardware not busy */
3666 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
3667 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3668 if (kStatus_Success != result)
3669 {
3670 return result;
3671 }
3672
3673 for (uint8_t i = 0; i < entrySize; i++)
3674 {
3675 /* Read address. */
3676 value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR((uint32_t)entrySize * entryIndex + i);
3677
3678 /* Issue read command. */
3679 value &= ~ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
3680 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3681
3682 /* Start Read */
3683 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
3684 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3685
3686 /* Wait hardware not busy */
3687 result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
3688 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3689 if (kStatus_Success != result)
3690 {
3691 return result;
3692 }
3693
3694 dataPtr = (uint32_t *)(void *)&rxpConfig[entryIndex];
3695 dataPtr = &dataPtr[i];
3696 /* Read data */
3697 *dataPtr = base->MTL_RXP_INDIRECT_ACC_DATA;
3698 }
3699
3700 return result;
3701 }
3702
3703 /*!
3704 * brief Configure flexible rx parser.
3705 *
3706 * This function is used to configure the flexible rx parser table.
3707 *
3708 * param base ENET peripheral base address..
3709 * param rxpConfig The rx parser configuration pointer.
3710 * param entryCount The rx parser entry count.
3711 * retval kStatus_Success Configure rx parser success.
3712 * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
3713 */
ENET_QOS_ConfigureRxParser(ENET_QOS_Type * base,enet_qos_rxp_config_t * rxpConfig,uint16_t entryCount)3714 status_t ENET_QOS_ConfigureRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryCount)
3715 {
3716 assert(rxpConfig != NULL);
3717 assert(entryCount <= ENET_QOS_RXP_ENTRY_COUNT);
3718
3719 uint32_t *dataPtr;
3720 uint32_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
3721 uint32_t value = 0U;
3722 status_t result = kStatus_Success;
3723 bool enableRx = false;
3724
3725 /* Disable the MAC rx. */
3726 if (0U != (base->MAC_CONFIGURATION & ENET_QOS_MAC_CONFIGURATION_RE_MASK))
3727 {
3728 base->MAC_CONFIGURATION &= ~ENET_QOS_MAC_CONFIGURATION_RE_MASK;
3729 enableRx = true;
3730 }
3731
3732 /* Disable frame parser. */
3733 result = ENET_QOS_EnableRxParser(base, false);
3734
3735 if (kStatus_Success != result)
3736 {
3737 return result;
3738 }
3739
3740 for (uint8_t count = 0; count < entryCount; count++)
3741 {
3742 for (uint8_t i = 0; i < entrySize; i++)
3743 {
3744 /* Wait hardware not busy */
3745 result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
3746 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3747 if (kStatus_Success != result)
3748 {
3749 return result;
3750 }
3751
3752 dataPtr = (uint32_t *)(void *)&rxpConfig[count];
3753 dataPtr = &dataPtr[i];
3754
3755 /* Write data before issue write command */
3756 base->MTL_RXP_INDIRECT_ACC_DATA = *dataPtr;
3757
3758 /* Write address and issue write command */
3759 value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR(entrySize * count + i);
3760 // base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3761
3762 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
3763 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3764
3765 /* Start write */
3766 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
3767 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3768 }
3769 }
3770
3771 /* Wait hardware not busy */
3772 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
3773 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3774 if (kStatus_Success != result)
3775 {
3776 return result;
3777 }
3778
3779 /* Program NVE and NPE. */
3780 value = base->MTL_RXP_CONTROL_STATUS;
3781 value &= ~(ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE_MASK | ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE_MASK);
3782
3783 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE((uint32_t)entryCount - 1U);
3784 if (entryCount < 3U)
3785 {
3786 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE(2U);
3787 }
3788 else
3789 {
3790 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE((uint32_t)entryCount - 1U);
3791 }
3792
3793 base->MTL_RXP_CONTROL_STATUS = value;
3794
3795 /* Enable frame parser. */
3796 result = ENET_QOS_EnableRxParser(base, true);
3797
3798 /* Enable Receive */
3799 if (enableRx)
3800 {
3801 base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
3802 }
3803
3804 return result;
3805 }
3806
3807 /*!
3808 * brief Gets statistical data in transfer.
3809 *
3810 * param base ENET_QOS peripheral base address.
3811 * param statistics The statistics structure pointer.
3812 */
ENET_QOS_GetStatistics(ENET_QOS_Type * base,enet_qos_transfer_stats_t * statistics)3813 void ENET_QOS_GetStatistics(ENET_QOS_Type *base, enet_qos_transfer_stats_t *statistics)
3814 {
3815 /* Rx statistics */
3816 statistics->statsRxFrameCount = base->MAC_RX_PACKETS_COUNT_GOOD_BAD;
3817 statistics->statsRxCrcErr = base->MAC_RX_CRC_ERROR_PACKETS;
3818 statistics->statsRxAlignErr = base->MAC_RX_ALIGNMENT_ERROR_PACKETS;
3819 statistics->statsRxLengthErr = base->MAC_RX_LENGTH_ERROR_PACKETS;
3820 statistics->statsRxFifoOverflowErr = base->MAC_RX_FIFO_OVERFLOW_PACKETS;
3821
3822 /* Tx statistics */
3823 statistics->statsTxFrameCount = base->MAC_TX_PACKET_COUNT_GOOD_BAD;
3824 statistics->statsTxFifoUnderRunErr = base->MAC_TX_UNDERFLOW_ERROR_PACKETS;
3825 }
3826
3827 /*!
3828 * brief The ENET IRQ handler.
3829 *
3830 * param base ENET peripheral base address.
3831 * param handle The ENET handler pointer.
3832 */
ENET_QOS_CommonIRQHandler(ENET_QOS_Type * base,enet_qos_handle_t * handle)3833 void ENET_QOS_CommonIRQHandler(ENET_QOS_Type *base, enet_qos_handle_t *handle)
3834 {
3835 /* Check for the interrupt source type. */
3836 /* DMA CHANNEL 0. */
3837 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC0IS_MASK) != 0U)
3838 {
3839 uint32_t flag = base->DMA_CH[0].DMA_CHX_STAT;
3840 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3841 {
3842 base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3843 if (handle->callback != NULL)
3844 {
3845 handle->callback(base, handle, kENET_QOS_RxIntEvent, 0, handle->userData);
3846 }
3847 }
3848 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3849 {
3850 base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3851 ENET_QOS_ReclaimTxDescriptor(base, handle, 0);
3852 }
3853 }
3854
3855 /* DMA CHANNEL 1. */
3856 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC1IS_MASK) != 0U)
3857 {
3858 uint32_t flag = base->DMA_CH[1].DMA_CHX_STAT;
3859 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3860 {
3861 base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3862 if (handle->callback != NULL)
3863 {
3864 handle->callback(base, handle, kENET_QOS_RxIntEvent, 1, handle->userData);
3865 }
3866 }
3867 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3868 {
3869 base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3870 ENET_QOS_ReclaimTxDescriptor(base, handle, 1);
3871 }
3872 }
3873
3874 /* DMA CHANNEL 2. */
3875 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC2IS_MASK) != 0U)
3876 {
3877 uint32_t flag = base->DMA_CH[2].DMA_CHX_STAT;
3878 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3879 {
3880 base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3881 if (handle->callback != NULL)
3882 {
3883 handle->callback(base, handle, kENET_QOS_RxIntEvent, 2, handle->userData);
3884 }
3885 }
3886 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3887 {
3888 base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3889 ENET_QOS_ReclaimTxDescriptor(base, handle, 2);
3890 }
3891 }
3892
3893 /* DMA CHANNEL 3. */
3894 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC3IS_MASK) != 0U)
3895 {
3896 uint32_t flag = base->DMA_CH[3].DMA_CHX_STAT;
3897 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3898 {
3899 base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3900 if (handle->callback != NULL)
3901 {
3902 handle->callback(base, handle, kENET_QOS_RxIntEvent, 3, handle->userData);
3903 }
3904 }
3905 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3906 {
3907 base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3908 ENET_QOS_ReclaimTxDescriptor(base, handle, 3);
3909 }
3910 }
3911
3912 /* MAC TIMESTAMP. */
3913 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_MACIS_MASK) != 0U)
3914 {
3915 if ((base->MAC_INTERRUPT_STATUS & ENET_QOS_MAC_INTERRUPT_STATUS_TSIS_MASK) != 0U)
3916 {
3917 if (handle->callback != NULL)
3918 {
3919 handle->callback(base, handle, kENET_QOS_TimeStampIntEvent, 0, handle->userData);
3920 }
3921 }
3922 }
3923 SDK_ISR_EXIT_BARRIER;
3924 }
3925
3926 #if defined(ENET_QOS)
3927 void ENET_QOS_DriverIRQHandler(void);
ENET_QOS_DriverIRQHandler(void)3928 void ENET_QOS_DriverIRQHandler(void)
3929 {
3930 s_enetqosIsr(ENET_QOS, s_ENETHandle[0]);
3931 }
3932 #endif
3933
3934 #if defined(CONNECTIVITY__ENET_QOS)
3935 void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void);
CONNECTIVITY_EQOS_INT_DriverIRQHandler(void)3936 void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void)
3937 {
3938 s_enetqosIsr(CONNECTIVITY__ENET_QOS, s_ENETHandle[0]);
3939 }
3940 #endif
3941