1 /*
2 * Copyright 2019-2021 NXP
3 * All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include "fsl_enet_qos.h"
9 /*******************************************************************************
10 * Definitions
11 ******************************************************************************/
12
13 /* Component ID definition, used by tools. */
14 #ifndef FSL_COMPONENT_ID
15 #define FSL_COMPONENT_ID "platform.drivers.enet_qos"
16 #endif
17
18 /*! @brief Defines 10^9 nanosecond. */
19 #define ENET_QOS_NANOSECS_ONESECOND (1000000000U)
20 /*! @brief Defines 10^6 microsecond.*/
21 #define ENET_QOS_MICRSECS_ONESECOND (1000000U)
22
23 /*! @brief Rx buffer LSB ignore bits. */
24 #define ENET_QOS_RXBUFF_IGNORELSB_BITS (3U)
25 /*! @brief ENET FIFO size unit. */
26 #define ENET_QOS_FIFOSIZE_UNIT (256U)
27 /*! @brief ENET half-dulpex default IPG. */
28 #define ENET_QOS_HALFDUPLEX_DEFAULTIPG (4U)
29 /*! @breif ENET miminum ring length. */
30 #define ENET_QOS_MIN_RINGLEN (4U)
31 /*! @breif ENET wakeup filter numbers. */
32 #define ENET_QOS_WAKEUPFILTER_NUM (8U)
33 /*! @breif Requried systime timer frequency. */
34 #define ENET_QOS_SYSTIME_REQUIRED_CLK_MHZ (50U)
35 /*! @brief Ethernet VLAN tag length. */
36 #define ENET_QOS_FRAME_VLAN_TAGLEN 4U
37
38 /*! @brief AVB TYPE */
39 #define ENET_QOS_AVBTYPE 0x22F0U
40 #define ENET_QOS_HEAD_TYPE_OFFSET (12)
41 #define ENET_QOS_HEAD_AVBTYPE_OFFSET (16)
42
43 /*! @brief Defines the macro for converting constants from host byte order to network byte order. */
44 #define ENET_QOS_HTONS(n) __REV16(n)
45 #define ENET_QOS_HTONL(n) __REV(n)
46 #define ENET_QOS_NTOHS(n) __REV16(n)
47 #define ENET_QOS_NTOHL(n) __REV(n)
48
49 #define ENET_QOS_DMA_CHX_RX_CTRL_RBSZ
50 /*******************************************************************************
51 * Prototypes
52 ******************************************************************************/
53
54 /*!
55 * @brief Increase the index in the ring.
56 *
57 * @param index The current index.
58 * @param max The size.
59 * @return the increased index.
60 */
61 static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max);
62
63 /*!
64 * @brief Poll status flag.
65 *
66 * @param regAddr The register address to read out status
67 * @param mask The mask to operate the register value.
68 * @param readyStatus Indicate readyStatus for the field
69 * @retval kStatus_Success Poll readyStatus Success.
70 * @retval kStatus_ENET_QOS_Timeout Poll readyStatus timeout.
71 */
72 static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus);
73
74 /*!
75 * @brief Set ENET DMA controller with the configuration.
76 *
77 * @param base ENET peripheral base address.
78 * @param config ENET Mac configuration.
79 */
80 static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config);
81
82 /*!
83 * @brief Set ENET MAC controller with the configuration.
84 *
85 * @param base ENET peripheral base address.
86 * @param config ENET Mac configuration.
87 * @param macAddr ENET six-byte mac address.
88 */
89 static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
90 const enet_qos_config_t *config,
91 uint8_t *macAddr,
92 uint8_t macCount);
93 /*!
94 * @brief Set ENET MTL with the configuration.
95 *
96 * @param base ENET peripheral base address.
97 * @param config ENET Mac configuration.
98 */
99 static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config);
100
101 /*!
102 * @brief Set ENET DMA transmit buffer descriptors for one channel.
103 *
104 * @param base ENET peripheral base address.
105 * @param bufferConfig ENET buffer configuration.
106 * @param intTxEnable tx interrupt enable.
107 * @param channel The channel number, 0 , 1.
108 */
109 static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
110 const enet_qos_buffer_config_t *bufferConfig,
111 bool intTxEnable,
112 uint8_t channel);
113
114 /*!
115 * @brief Set ENET DMA receive buffer descriptors for one channel.
116 *
117 * @param base ENET peripheral base address.
118 * @param bufferConfig ENET buffer configuration.
119 * @param intRxEnable tx interrupt enable.
120 * @param channel The channel number, 0, 1.
121 */
122 static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
123 enet_qos_config_t *config,
124 const enet_qos_buffer_config_t *bufferConfig,
125 bool intRxEnable,
126 uint8_t channel);
127
128 /*!
129 * @brief Sets the ENET 1588 feature.
130 *
131 * Enable the enhacement 1588 buffer descriptor mode and start
132 * the 1588 timer.
133 *
134 * @param base ENET peripheral base address.
135 * @param config The ENET configuration.
136 * @param refClk_Hz The reference clock for ptp 1588.
137 */
138 static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz);
139
140 /*!
141 * @brief Store the receive time-stamp for event PTP frame in the time-stamp buffer ring.
142 *
143 * @param base ENET peripheral base address.
144 * @param handle ENET handler.
145 * @param rxDesc The ENET receive descriptor pointer.
146 * @param channel The rx channel.
147 * @param ts The timestamp structure pointer.
148 */
149 static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
150 enet_qos_handle_t *handle,
151 enet_qos_rx_bd_struct_t *rxDesc,
152 // uint8_t channel,
153 enet_qos_ptp_time_t *ts);
154
155 /*!
156 * @brief Check if txDirtyRing available.
157 *
158 * @param txDirtyRing pointer to txDirtyRing
159 * @retval txDirty available status.
160 */
161 static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing);
162
163 /*******************************************************************************
164 * Variables
165 ******************************************************************************/
166 /*! @brief Pointers to enet bases for each instance. */
167 static ENET_QOS_Type *const s_enetqosBases[] = ENET_QOS_BASE_PTRS;
168
169 /*! @brief Pointers to enet IRQ number for each instance. */
170 static const IRQn_Type s_enetqosIrqId[] = ENET_QOS_IRQS;
171
172 /* ENET ISR for transactional APIs. */
173 static enet_qos_isr_t s_enetqosIsr;
174
175 /*! @brief Pointers to enet handles for each instance. */
176 static enet_qos_handle_t *s_ENETHandle[ARRAY_SIZE(s_enetqosBases)] = {NULL};
177
178 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
179 /*! @brief Pointers to enet clocks for each instance. */
180 const clock_ip_name_t s_enetqosClock[ARRAY_SIZE(s_enetqosBases)] = ENETQOS_CLOCKS;
181 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
182
183 /*******************************************************************************
184 * Code
185 ******************************************************************************/
186
ENET_QOS_PollStatusFlag(volatile uint32_t * regAddr,uint32_t mask,uint32_t readyStatus)187 static status_t ENET_QOS_PollStatusFlag(volatile uint32_t *regAddr, uint32_t mask, uint32_t readyStatus)
188 {
189 uint8_t retryTimes = 10U;
190 status_t result = kStatus_Success;
191
192 while ((readyStatus != (*regAddr & mask)) && (0U != retryTimes))
193 {
194 retryTimes--;
195 SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
196 }
197
198 if (retryTimes == 0U)
199 {
200 result = kStatus_ENET_QOS_Timeout;
201 }
202
203 return result;
204 }
205
206 /*!
207 * brief Sets the ENET AVB feature.
208 *
209 * ENET_QOS AVB feature configuration, set transmit bandwidth.
210 * This API is called when the AVB feature is required.
211 *
212 * param base ENET_QOS peripheral base address.
213 * param config The ENET_QOS AVB feature configuration structure.
214 * param queueIndex ENET_QOS queue index.
215 */
ENET_QOS_AVBConfigure(ENET_QOS_Type * base,const enet_qos_cbs_config_t * config,uint8_t queueIndex)216 void ENET_QOS_AVBConfigure(ENET_QOS_Type *base, const enet_qos_cbs_config_t *config, uint8_t queueIndex)
217 {
218 assert(config != NULL);
219
220 /* Enable AV algorithm */
221 base->MTL_QUEUE[queueIndex].MTL_TXQX_ETS_CTRL |= ENET_QOS_MTL_TXQX_ETS_CTRL_AVALG_MASK;
222 /* Configure send slope */
223 base->MTL_QUEUE[queueIndex].MTL_TXQX_SNDSLP_CRDT = config->sendSlope;
224 /* Configure idle slope (same register as tx weight) */
225 base->MTL_QUEUE[queueIndex].MTL_TXQX_QNTM_WGHT = config->idleSlope;
226 /* Configure high credit */
227 base->MTL_QUEUE[queueIndex].MTL_TXQX_HI_CRDT = config->highCredit;
228 /* Configure high credit */
229 base->MTL_QUEUE[queueIndex].MTL_TXQX_LO_CRDT = config->lowCredit;
230 }
231
ENET_QOS_IncreaseIndex(uint16_t index,uint16_t max)232 static uint16_t ENET_QOS_IncreaseIndex(uint16_t index, uint16_t max)
233 {
234 /* Increase the index. */
235 index++;
236 if (index >= max)
237 {
238 index = 0;
239 }
240 return index;
241 }
242
ENET_QOS_ReverseBits(uint32_t value)243 static uint32_t ENET_QOS_ReverseBits(uint32_t value)
244 {
245 value = ((value & 0x55555555UL) << 1U) | ((value >> 1U) & 0x55555555UL);
246 value = ((value & 0x33333333UL) << 2U) | ((value >> 2U) & 0x33333333UL);
247 value = ((value & 0x0F0F0F0FUL) << 4U) | ((value >> 4U) & 0x0F0F0F0FUL);
248
249 return (value >> 24U) | ((value >> 8U) & 0xFF00UL) | ((value & 0xFF00UL) << 8U) | (value << 24U);
250 }
251
ENET_QOS_SetDMAControl(ENET_QOS_Type * base,const enet_qos_config_t * config)252 static void ENET_QOS_SetDMAControl(ENET_QOS_Type *base, const enet_qos_config_t *config)
253 {
254 assert(config != NULL);
255
256 uint8_t index;
257 uint32_t reg;
258 uint32_t burstLen;
259
260 /* Reset first and wait for the complete
261 * The reset bit will automatically be cleared after complete. */
262 base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
263 while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
264 {
265 }
266
267 /* Set the burst length. */
268 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
269 {
270 burstLen = (uint32_t)kENET_QOS_BurstLen1;
271 if (config->multiqueueCfg != NULL)
272 {
273 burstLen = (uint32_t)config->multiqueueCfg->burstLen;
274 }
275 base->DMA_CH[index].DMA_CHX_CTRL = burstLen & ENET_QOS_DMA_CHX_CTRL_PBLx8_MASK;
276
277 reg = base->DMA_CH[index].DMA_CHX_TX_CTRL & ~ENET_QOS_DMA_CHX_TX_CTRL_TxPBL_MASK;
278 base->DMA_CH[index].DMA_CHX_TX_CTRL = reg | ENET_QOS_DMA_CHX_TX_CTRL_TxPBL(burstLen & 0x3FU);
279
280 reg = base->DMA_CH[index].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RxPBL_MASK;
281 base->DMA_CH[index].DMA_CHX_RX_CTRL = reg | ENET_QOS_DMA_CHX_RX_CTRL_RxPBL(burstLen & 0x3FU);
282 }
283 }
284
ENET_QOS_SetMTL(ENET_QOS_Type * base,const enet_qos_config_t * config)285 static void ENET_QOS_SetMTL(ENET_QOS_Type *base, const enet_qos_config_t *config)
286 {
287 assert(config != NULL);
288
289 uint32_t txqOpreg = 0;
290 uint32_t rxqOpReg = 0;
291 enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
292 uint8_t index;
293
294 /* Set transmit operation mode. */
295 if ((config->specialControl & (uint32_t)kENET_QOS_StoreAndForward) != 0U)
296 {
297 txqOpreg = ENET_QOS_MTL_TXQX_OP_MODE_TSF_MASK;
298 rxqOpReg = ENET_QOS_MTL_RXQX_OP_MODE_RSF_MASK;
299 }
300 /* Set transmit operation mode. */
301 txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
302 /* Set receive operation mode. */
303 rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_FUP_MASK | ENET_QOS_MTL_RXQX_OP_MODE_RFD(3U) |
304 ENET_QOS_MTL_RXQX_OP_MODE_RFA(1U) | ENET_QOS_MTL_RXQX_OP_MODE_EHFC_MASK;
305
306 if (multiqCfg == NULL)
307 {
308 txqOpreg |=
309 ENET_QOS_MTL_TXQX_OP_MODE_TQS(((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
310 rxqOpReg |=
311 ENET_QOS_MTL_RXQX_OP_MODE_RQS(((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / (uint32_t)ENET_QOS_FIFOSIZE_UNIT - 1U));
312 base->MTL_QUEUE[0].MTL_TXQX_OP_MODE = txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)kENET_QOS_DCB_Mode);
313 base->MTL_QUEUE[0].MTL_RXQX_OP_MODE = rxqOpReg;
314 }
315 else
316 {
317 /* Set the schedule/arbitration(set for multiple queues). */
318 base->MTL_OPERATION_MODE = ENET_QOS_MTL_OPERATION_MODE_SCHALG(multiqCfg->mtltxSche) |
319 ENET_QOS_MTL_OPERATION_MODE_RAA(multiqCfg->mtlrxSche);
320
321 for (index = 0; index < multiqCfg->txQueueUse; index++)
322 {
323 txqOpreg |= ENET_QOS_MTL_TXQX_OP_MODE_TQS(
324 ((uint32_t)ENET_QOS_MTL_TXFIFOSIZE / ((uint32_t)multiqCfg->txQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
325 base->MTL_QUEUE[index].MTL_TXQX_OP_MODE =
326 txqOpreg | ENET_QOS_MTL_TXQX_OP_MODE_TXQEN((uint32_t)multiqCfg->txQueueConfig[index].mode);
327 if (multiqCfg->txQueueConfig[index].mode == kENET_QOS_AVB_Mode)
328 {
329 ENET_QOS_AVBConfigure(base, multiqCfg->txQueueConfig[index].cbsConfig, index);
330 }
331 else
332 {
333 base->MTL_QUEUE[index].MTL_TXQX_QNTM_WGHT = multiqCfg->txQueueConfig[index].weight;
334 }
335 }
336
337 volatile uint32_t *mtlrxQuemapReg;
338 uint8_t configIndex;
339 for (index = 0; index < multiqCfg->rxQueueUse; index++)
340 {
341 rxqOpReg |= ENET_QOS_MTL_RXQX_OP_MODE_RQS(
342 ((uint32_t)ENET_QOS_MTL_RXFIFOSIZE / ((uint32_t)multiqCfg->rxQueueUse * ENET_QOS_FIFOSIZE_UNIT)) - 1U);
343 base->MTL_QUEUE[index].MTL_RXQX_OP_MODE = rxqOpReg;
344 mtlrxQuemapReg = (index < 4U) ? &base->MTL_RXQ_DMA_MAP0 : &base->MTL_RXQ_DMA_MAP1;
345 configIndex = (index & 0x3U);
346 *mtlrxQuemapReg &= ~((uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH_MASK << (8U * configIndex));
347 *mtlrxQuemapReg |= (uint32_t)ENET_QOS_MTL_RXQ_DMA_MAP0_Q0MDMACH(multiqCfg->rxQueueConfig[index].mapChannel)
348 << (8U * configIndex);
349 }
350 }
351 }
352
ENET_QOS_SetMacControl(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount)353 static void ENET_QOS_SetMacControl(ENET_QOS_Type *base,
354 const enet_qos_config_t *config,
355 uint8_t *macAddr,
356 uint8_t macCount)
357 {
358 assert(config != NULL);
359
360 uint32_t reg = 0;
361
362 /* Set Macaddr */
363 /* The dma channel 0 is set as to which the rx packet
364 * whose DA matches the MAC address content is routed. */
365 if (macAddr != NULL)
366 {
367 for (uint8_t i = 0; i < macCount; i++)
368 {
369 ENET_QOS_SetMacAddr(base, macAddr, i);
370 }
371 }
372
373 /* Set the receive filter. */
374 reg =
375 ENET_QOS_MAC_PACKET_FILTER_PR(((config->specialControl & (uint32_t)kENET_QOS_PromiscuousEnable) != 0U) ? 1U :
376 0U) |
377 ENET_QOS_MAC_PACKET_FILTER_DBF(((config->specialControl & (uint32_t)kENET_QOS_BroadCastRxDisable) != 0U) ? 1U :
378 0U) |
379 ENET_QOS_MAC_PACKET_FILTER_PM(((config->specialControl & (uint32_t)kENET_QOS_MulticastAllEnable) != 0U) ? 1U :
380 0U) |
381 ENET_QOS_MAC_PACKET_FILTER_HMC(((config->specialControl & (uint32_t)kENET_QOS_HashMulticastEnable) != 0U) ? 1U :
382 0U);
383 base->MAC_PACKET_FILTER = reg;
384 /* Flow control. */
385 if ((config->specialControl & (uint32_t)kENET_QOS_FlowControlEnable) != 0U)
386 {
387 base->MAC_RX_FLOW_CTRL = ENET_QOS_MAC_RX_FLOW_CTRL_RFE_MASK | ENET_QOS_MAC_RX_FLOW_CTRL_UP_MASK;
388 base->MAC_TX_FLOW_CTRL_Q[0] = ENET_QOS_MAC_TX_FLOW_CTRL_Q_PT(config->pauseDuration);
389 }
390
391 /* Set the 1us ticket. */
392 reg = config->csrClock_Hz / ENET_QOS_MICRSECS_ONESECOND - 1U;
393 base->MAC_ONEUS_TIC_COUNTER = ENET_QOS_MAC_ONEUS_TIC_COUNTER_TIC_1US_CNTR(reg);
394
395 /* Set the speed and duplex. */
396 reg = ENET_QOS_MAC_CONFIGURATION_DM(config->miiDuplex) | (uint32_t)config->miiSpeed |
397 ENET_QOS_MAC_CONFIGURATION_S2KP(((config->specialControl & (uint32_t)kENET_QOS_8023AS2KPacket) != 0U) ? 1U :
398 0U);
399 if (config->miiDuplex == kENET_QOS_MiiHalfDuplex)
400 {
401 reg |= ENET_QOS_MAC_CONFIGURATION_IPG(ENET_QOS_HALFDUPLEX_DEFAULTIPG);
402 }
403 base->MAC_CONFIGURATION = reg;
404
405 if (config->multiqueueCfg != NULL)
406 {
407 reg = 0U;
408 uint8_t configIndex;
409 enet_qos_multiqueue_config_t *multiqCfg = config->multiqueueCfg;
410 uint32_t txQueuePrioMap0 = base->MAC_TXQ_PRTY_MAP0;
411 uint32_t txQueuePrioMap1 = base->MAC_TXQ_PRTY_MAP1;
412 uint32_t rxQueuePrioMap0 = base->MAC_RXQ_CTRL[2];
413 uint32_t rxQueuePrioMap1 = base->MAC_RXQ_CTRL[3];
414 uint32_t rxCtrlReg1 = base->MAC_RXQ_CTRL[1];
415
416 for (uint8_t index = 0U; index < multiqCfg->txQueueUse; index++)
417 {
418 configIndex = index & 0x3U;
419
420 /* Configure tx queue priority. */
421 if (index < 4U)
422 {
423 txQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
424 txQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
425 << (8U * configIndex);
426 }
427 else
428 {
429 txQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK << (8U * configIndex));
430 txQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_TXQ_PRTY_MAP0_PSTQ0(multiqCfg->txQueueConfig[index].priority)
431 << (8U * configIndex);
432 }
433 }
434
435 for (uint8_t index = 0U; index < multiqCfg->rxQueueUse; index++)
436 {
437 configIndex = index & 0x3U;
438
439 /* Configure rx queue priority. */
440 if (index < 4U)
441 {
442 rxQueuePrioMap0 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
443 rxQueuePrioMap0 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
444 << (8U * configIndex);
445 }
446 else
447 {
448 rxQueuePrioMap1 &= ~((uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0_MASK << (8U * configIndex));
449 rxQueuePrioMap1 |= (uint32_t)ENET_QOS_MAC_RXQ_CTRL_PSRQ0(multiqCfg->rxQueueConfig[index].priority)
450 << (8U * configIndex);
451 }
452
453 /* Configure queue enable mode. */
454 reg |= ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)multiqCfg->rxQueueConfig[index].mode) << (2U * index);
455
456 /* Configure rx queue routing */
457 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketAVCPQ) != 0U)
458 {
459 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_AVCPQ_MASK;
460 rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_AVCPQ(index) | ENET_QOS_MAC_RXQ_CTRL_TACPQE_MASK);
461 }
462
463 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketPTPQ) != 0U)
464 {
465 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_PTPQ_MASK;
466 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_PTPQ(index);
467 }
468
469 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketDCBCPQ) != 0U)
470 {
471 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_DCBCPQ_MASK;
472 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_DCBCPQ(index);
473 }
474
475 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketUPQ) != 0U)
476 {
477 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_UPQ_MASK;
478 rxCtrlReg1 |= ENET_QOS_MAC_RXQ_CTRL_UPQ(index);
479 }
480
481 if (((uint8_t)multiqCfg->rxQueueConfig[index].packetRoute & (uint8_t)kENET_QOS_PacketMCBCQ) != 0U)
482 {
483 rxCtrlReg1 &= ~ENET_QOS_MAC_RXQ_CTRL_MCBCQ_MASK;
484 rxCtrlReg1 |= (ENET_QOS_MAC_RXQ_CTRL_MCBCQ(index) | ENET_QOS_MAC_RXQ_CTRL_MCBCQEN_MASK);
485 }
486 }
487
488 base->MAC_TXQ_PRTY_MAP0 = txQueuePrioMap0;
489 base->MAC_TXQ_PRTY_MAP1 = txQueuePrioMap1;
490 base->MAC_RXQ_CTRL[2] = rxQueuePrioMap0;
491 base->MAC_RXQ_CTRL[3] = rxQueuePrioMap1;
492 base->MAC_RXQ_CTRL[1] = rxCtrlReg1;
493 }
494 else
495 {
496 /* Configure queue enable mode. */
497 reg = ENET_QOS_MAC_RXQ_CTRL_RXQ0EN((uint32_t)kENET_QOS_DCB_Mode);
498 }
499
500 /* Enable queue. */
501 base->MAC_RXQ_CTRL[0] = reg;
502
503 /* Mask MMC counters interrupts as we don't handle
504 * them in the interrupt handler.
505 */
506 base->MAC_MMC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
507 base->MAC_MMC_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
508 base->MAC_MMC_IPC_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
509 base->MAC_MMC_FPE_RX_INTERRUPT_MASK = 0xFFFFFFFFU;
510 base->MAC_MMC_FPE_TX_INTERRUPT_MASK = 0xFFFFFFFFU;
511 }
512
ENET_QOS_TxDescriptorsInit(ENET_QOS_Type * base,const enet_qos_buffer_config_t * bufferConfig,bool intTxEnable,uint8_t channel)513 static status_t ENET_QOS_TxDescriptorsInit(ENET_QOS_Type *base,
514 const enet_qos_buffer_config_t *bufferConfig,
515 bool intTxEnable,
516 uint8_t channel)
517 {
518 uint16_t j;
519 enet_qos_tx_bd_struct_t *txbdPtr;
520 uint32_t control = intTxEnable ? ENET_QOS_TXDESCRIP_RD_IOC_MASK : 0U;
521 const enet_qos_buffer_config_t *buffCfg = bufferConfig;
522
523 if (buffCfg == NULL)
524 {
525 return kStatus_InvalidArgument;
526 }
527
528 /* Check the ring length. */
529 if (buffCfg->txRingLen < ENET_QOS_MIN_RINGLEN)
530 {
531 return kStatus_InvalidArgument;
532 }
533 /* Set the tx descriptor start/tail pointer, shall be word aligned. */
534 base->DMA_CH[channel].DMA_CHX_TXDESC_LIST_ADDR =
535 (uint32_t)buffCfg->txDescStartAddrAlign & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
536 base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR =
537 (uint32_t)buffCfg->txDescTailAddrAlign & ENET_QOS_DMA_CHX_TXDESC_TAIL_PTR_TDTP_MASK;
538 /* Set the tx ring length. */
539 base->DMA_CH[channel].DMA_CHX_TXDESC_RING_LENGTH =
540 ((uint32_t)buffCfg->txRingLen - 1U) & ENET_QOS_DMA_CHX_TXDESC_RING_LENGTH_TDRL_MASK;
541
542 /* Init the txbdPtr to the transmit descriptor start address. */
543 txbdPtr = (enet_qos_tx_bd_struct_t *)(buffCfg->txDescStartAddrAlign);
544 for (j = 0; j < buffCfg->txRingLen; j++)
545 {
546 txbdPtr->buff1Addr = 0;
547 txbdPtr->buff2Addr = 0;
548 txbdPtr->buffLen = control;
549 txbdPtr->controlStat = 0;
550 txbdPtr++;
551 }
552
553 return kStatus_Success;
554 }
555
ENET_QOS_RxDescriptorsInit(ENET_QOS_Type * base,enet_qos_config_t * config,const enet_qos_buffer_config_t * bufferConfig,bool intRxEnable,uint8_t channel)556 static status_t ENET_QOS_RxDescriptorsInit(ENET_QOS_Type *base,
557 enet_qos_config_t *config,
558 const enet_qos_buffer_config_t *bufferConfig,
559 bool intRxEnable,
560 uint8_t channel)
561 {
562 uint16_t j;
563 uint32_t reg;
564 enet_qos_rx_bd_struct_t *rxbdPtr;
565 uint16_t index;
566 bool doubleBuffEnable = ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U) ? true : false;
567 const enet_qos_buffer_config_t *buffCfg = bufferConfig;
568 uint32_t control = ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
569
570 if (buffCfg == NULL)
571 {
572 return kStatus_InvalidArgument;
573 }
574
575 if (intRxEnable)
576 {
577 control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
578 }
579
580 if (doubleBuffEnable)
581 {
582 control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
583 }
584
585 /* Not give ownership to DMA before Rx buffer is ready */
586 if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
587 {
588 control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
589 }
590
591 /* Check the ring length. */
592 if (buffCfg->rxRingLen < ENET_QOS_MIN_RINGLEN)
593 {
594 return kStatus_InvalidArgument;
595 }
596
597 /* Set the rx descriptor start/tail pointer, shall be word aligned. */
598 base->DMA_CH[channel].DMA_CHX_RXDESC_LIST_ADDR =
599 (uint32_t)buffCfg->rxDescStartAddrAlign & ENET_QOS_DMA_CHX_RXDESC_LIST_ADDR_RDESLA_MASK;
600 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR =
601 (uint32_t)buffCfg->rxDescTailAddrAlign & ENET_QOS_DMA_CHX_RXDESC_TAIL_PTR_RDTP_MASK;
602 base->DMA_CH[channel].DMA_CHX_RXDESC_RING_LENGTH =
603 ((uint32_t)buffCfg->rxRingLen - 1U) & ENET_QOS_DMA_CHX_RXDESC_RING_LENGTH_RDRL_MASK;
604 reg = base->DMA_CH[channel].DMA_CHX_RX_CTRL & ~ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y_MASK;
605 reg |= ENET_QOS_DMA_CHX_RX_CTRL_RBSZ_13_y(buffCfg->rxBuffSizeAlign >> ENET_QOS_RXBUFF_IGNORELSB_BITS);
606 base->DMA_CH[channel].DMA_CHX_RX_CTRL = reg;
607
608 /* Init the rxbdPtr to the receive descriptor start address. */
609 rxbdPtr = (enet_qos_rx_bd_struct_t *)(buffCfg->rxDescStartAddrAlign);
610 for (j = 0U; j < buffCfg->rxRingLen; j++)
611 {
612 if ((config->rxBuffAlloc == NULL) || (config->rxBuffFree == NULL))
613 {
614 if (doubleBuffEnable)
615 {
616 index = 2U * j;
617 }
618 else
619 {
620 index = j;
621 }
622
623 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
624 buffCfg->rxBufferStartAddr[index] =
625 MEMORY_ConvertMemoryMapAddress((uint32_t)buffCfg->rxBufferStartAddr[index], kMEMORY_Local2DMA);
626 #endif
627 rxbdPtr->buff1Addr = buffCfg->rxBufferStartAddr[index];
628
629 /* The second buffer is set with 0 because it is not required for normal case. */
630 if (doubleBuffEnable)
631 {
632 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
633 buffCfg->rxBufferStartAddr[index + 1U] =
634 MEMORY_ConvertMemoryMapAddress((uint32_t)buffCfg->rxBufferStartAddr[index + 1U], kMEMORY_Local2DMA);
635 #endif
636 rxbdPtr->buff2Addr = buffCfg->rxBufferStartAddr[index + 1U];
637 }
638 else
639 {
640 rxbdPtr->buff2Addr = 0;
641 }
642 }
643
644 /* Set the valid and DMA own flag.*/
645 rxbdPtr->control = control;
646 rxbdPtr++;
647 }
648
649 return kStatus_Success;
650 }
651
ENET_QOS_SetPtp1588(ENET_QOS_Type * base,const enet_qos_config_t * config,uint32_t refClk_Hz)652 static status_t ENET_QOS_SetPtp1588(ENET_QOS_Type *base, const enet_qos_config_t *config, uint32_t refClk_Hz)
653 {
654 assert(config != NULL);
655 assert(config->ptpConfig != NULL);
656 assert(refClk_Hz != 0U);
657
658 uint32_t control = 0U;
659 status_t result = kStatus_Success;
660 enet_qos_ptp_config_t *ptpConfig = config->ptpConfig;
661 uint32_t ptpClk_Hz = refClk_Hz;
662 uint32_t ssInc, snsSinc;
663
664 /* Clear the timestamp interrupt first. */
665 base->MAC_INTERRUPT_ENABLE &= ~ENET_QOS_MAC_INTERRUPT_ENABLE_TSIE_MASK;
666
667 if (ptpConfig->fineUpdateEnable)
668 {
669 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCFUPDT_MASK;
670 ptpClk_Hz = ptpConfig->systemTimeClock_Hz; /* PTP clock 50MHz. */
671 }
672
673 /* Enable the IEEE 1588 timestamping and snapshot for event message. */
674 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV4ENA_MASK |
675 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPV6ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSENALL_MASK |
676 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSEVNTENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_SNAPTYPSEL_MASK |
677 ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR(ptpConfig->tsRollover);
678
679 if (ptpConfig->ptp1588V2Enable)
680 {
681 control |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSVER2ENA_MASK | ENET_QOS_MAC_TIMESTAMP_CONTROL_TSIPENA_MASK;
682 }
683
684 /* Initialize the sub-second increment register. */
685 if (ptpConfig->tsRollover == kENET_QOS_DigitalRollover)
686 {
687 ssInc = (uint32_t)(((uint64_t)ENET_QOS_NANOSECS_ONESECOND << 8U) / ptpClk_Hz);
688 }
689 else
690 {
691 ssInc = (uint32_t)((((uint64_t)ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK + 1U) << 8U) / ptpClk_Hz);
692 }
693
694 snsSinc = ssInc & 0xFFU;
695 ssInc = (ssInc >> 8U) & 0xFFU;
696
697 base->MAC_TIMESTAMP_CONTROL = control;
698
699 /* Initialize the system timer. */
700 base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = 0;
701
702 /* Set the second.*/
703 base->MAC_SYSTEM_TIME_SECONDS_UPDATE = 0;
704 base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS = 0;
705
706 /* Initialize the system timer. */
707 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK;
708
709 while ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSINIT_MASK) != 0U)
710 {
711 }
712
713 base->MAC_SUB_SECOND_INCREMENT =
714 ENET_QOS_MAC_SUB_SECOND_INCREMENT_SSINC(ssInc) | ENET_QOS_MAC_SUB_SECOND_INCREMENT_SNSINC(snsSinc);
715
716 /* Set the initial added value for the fine update. */
717 if (ptpConfig->fineUpdateEnable)
718 {
719 result = ENET_QOS_Ptp1588CorrectTimerInFine(base, ptpConfig->defaultAddend);
720 }
721
722 return result;
723 }
724
ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t * txDirtyRing)725 static inline bool ENET_QOS_TxDirtyRingAvailable(enet_qos_tx_dirty_ring_t *txDirtyRing)
726 {
727 return !txDirtyRing->isFull;
728 }
729
ENET_QOS_StoreRxFrameTime(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_rx_bd_struct_t * rxDesc,enet_qos_ptp_time_t * ts)730 static void ENET_QOS_StoreRxFrameTime(ENET_QOS_Type *base,
731 enet_qos_handle_t *handle,
732 enet_qos_rx_bd_struct_t *rxDesc,
733 enet_qos_ptp_time_t *ts)
734 {
735 assert(ts != NULL);
736
737 uint32_t nanosecond;
738
739 /* Get transmit time stamp second. */
740 nanosecond = rxDesc->buff1Addr;
741 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
742 {
743 /* Binary rollover, 0.465ns accuracy. */
744 nanosecond = (uint32_t)(((uint64_t)nanosecond * 465U) / 1000U);
745 }
746 ts->second = rxDesc->reserved;
747 ts->nanosecond = nanosecond;
748 }
749
ENET_QOS_GetInstance(ENET_QOS_Type * base)750 uint32_t ENET_QOS_GetInstance(ENET_QOS_Type *base)
751 {
752 uint32_t instance;
753
754 /* Find the instance index from base address mappings. */
755 for (instance = 0; instance < ARRAY_SIZE(s_enetqosBases); instance++)
756 {
757 if (s_enetqosBases[instance] == base)
758 {
759 break;
760 }
761 }
762
763 assert(instance < ARRAY_SIZE(s_enetqosBases));
764
765 return instance;
766 }
767
768 /*!
769 * brief Gets the ENET default configuration structure.
770 *
771 * The purpose of this API is to get the default ENET configure
772 * structure for ENET_QOS_Init(). User may use the initialized
773 * structure unchanged in ENET_QOS_Init(), or modify some fields of the
774 * structure before calling ENET_QOS_Init().
775 * Example:
776 code
777 enet_qos_config_t config;
778 ENET_QOS_GetDefaultConfig(&config);
779 endcode
780 * param config The ENET mac controller configuration structure pointer.
781 */
ENET_QOS_GetDefaultConfig(enet_qos_config_t * config)782 void ENET_QOS_GetDefaultConfig(enet_qos_config_t *config)
783 {
784 /* Checks input parameter. */
785 assert(config != NULL);
786
787 /* Initializes the configure structure to zero. */
788 (void)memset(config, 0, sizeof(*config));
789
790 /* Sets RGMII mode, full duplex, 1000Mbps for MAC and PHY data interface. */
791 config->miiMode = kENET_QOS_RgmiiMode;
792 config->miiSpeed = kENET_QOS_MiiSpeed1000M;
793 config->miiDuplex = kENET_QOS_MiiFullDuplex;
794
795 /* Sets default configuration for other options. */
796 config->specialControl = 0;
797 config->multiqueueCfg = NULL;
798 config->pauseDuration = 0;
799
800 config->ptpConfig = NULL;
801 }
802
803 /*!
804 * brief Initializes the ENET module.
805 *
806 * This function set up the with ENET basic configuration.
807 *
808 * param base ENET peripheral base address.
809 * param config ENET mac configuration structure pointer.
810 * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
811 * can be used directly. It is also possible to verify the Mac configuration using other methods.
812 * param macAddr ENET mac address of Ethernet device. This MAC address should be
813 * provided.
814 * param refclkSrc_Hz ENET input reference clock.
815 */
ENET_QOS_Up(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount,uint32_t refclkSrc_Hz)816 status_t ENET_QOS_Up(
817 ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
818 {
819 assert(config != NULL);
820 status_t result = kStatus_Success;
821
822 /* Initializes the ENET MTL with basic function. */
823 ENET_QOS_SetMTL(base, config);
824
825 /* Initializes the ENET MAC with basic function. */
826 ENET_QOS_SetMacControl(base, config, macAddr, macCount);
827
828 return result;
829 }
830
831 /*!
832 * brief Initializes the ENET module.
833 *
834 * This function ungates the module clock and initializes it with the ENET basic
835 * configuration.
836 *
837 * param base ENET peripheral base address.
838 * param config ENET mac configuration structure pointer.
839 * The "enet_qos_config_t" type mac configuration return from ENET_QOS_GetDefaultConfig
840 * can be used directly. It is also possible to verify the Mac configuration using other methods.
841 * param macAddr ENET mac address of Ethernet device. This MAC address should be
842 * provided.
843 * param refclkSrc_Hz ENET input reference clock.
844 */
ENET_QOS_Init(ENET_QOS_Type * base,const enet_qos_config_t * config,uint8_t * macAddr,uint8_t macCount,uint32_t refclkSrc_Hz)845 status_t ENET_QOS_Init(
846 ENET_QOS_Type *base, const enet_qos_config_t *config, uint8_t *macAddr, uint8_t macCount, uint32_t refclkSrc_Hz)
847 {
848 assert(config != NULL);
849
850 status_t result = kStatus_Success;
851 uint32_t instance = ENET_QOS_GetInstance(base);
852 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
853 /* Ungate ENET clock. */
854 (void)CLOCK_EnableClock(s_enetqosClock[instance]);
855 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
856
857 /* System configure fistly. */
858 ENET_QOS_SetSYSControl(config->miiMode);
859
860 /* Initializes the ENET DMA with basic function. */
861 ENET_QOS_SetDMAControl(base, config);
862
863 (void)ENET_QOS_Up(base, config, macAddr, macCount, refclkSrc_Hz);
864
865 if (config->ptpConfig != NULL)
866 {
867 result = ENET_QOS_SetPtp1588(base, config, refclkSrc_Hz);
868 }
869
870 return result;
871 }
872
873 /*!
874 * brief Stops the ENET module.
875
876 * This function disables the ENET module.
877 *
878 * param base ENET peripheral base address.
879 */
ENET_QOS_Down(ENET_QOS_Type * base)880 void ENET_QOS_Down(ENET_QOS_Type *base)
881 {
882 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
883 enet_qos_tx_bd_struct_t *txbdPtr;
884 uint8_t index;
885 uint32_t primask, j;
886
887 /* Disable all interrupts */
888 ENET_QOS_DisableInterrupts(base, 0xFF);
889
890 for (index = 0; index < handle->txQueueUse; index++)
891 {
892 enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[index];
893 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[index];
894
895 /* Clear pending descriptors */
896 if (handle->callback != NULL)
897 {
898 while (txBdRing->txDescUsed > 0U)
899 {
900 enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
901
902 txDirty->isTsAvail = false;
903
904 handle->callback(base, handle, kENET_QOS_TxIntEvent, index, handle->userData);
905
906 primask = DisableGlobalIRQ();
907 txBdRing->txDescUsed--;
908 EnableGlobalIRQ(primask);
909 }
910 }
911
912 /* Disable Tx DMA */
913 base->DMA_CH[index].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
914
915 /* Flush Tx Queue */
916 base->MTL_QUEUE[index].MTL_TXQX_OP_MODE |= ENET_QOS_MTL_TXQX_OP_MODE_FTQ_MASK;
917
918 /* Wait until Tx Queue is empty */
919 while ((base->MTL_QUEUE[index].MTL_TXQX_DBG &
920 (ENET_QOS_MTL_TXQX_DBG_TXQSTS_MASK | ENET_QOS_MTL_TXQX_DBG_PTXQ_MASK)) != 0U)
921 {
922 }
923
924 /* Reset hardware ring buffer */
925 base->DMA_CH[index].DMA_CHX_TXDESC_LIST_ADDR =
926 (uint32_t)handle->txBdRing[index].txBdBase & ENET_QOS_DMA_CHX_TXDESC_LIST_ADDR_TDESLA_MASK;
927
928 /* Reset software ring buffer */
929 handle->txBdRing[index].txGenIdx = 0;
930 handle->txBdRing[index].txConsumIdx = 0;
931 handle->txBdRing[index].txDescUsed = 0;
932
933 handle->txDirtyRing[index].txGenIdx = 0;
934 handle->txDirtyRing[index].txConsumIdx = 0;
935 handle->txDirtyRing[index].isFull = false;
936
937 txbdPtr = (enet_qos_tx_bd_struct_t *)(handle->txBdRing[index].txBdBase);
938 for (j = 0; j < handle->txBdRing[index].txRingLen; j++)
939 {
940 txbdPtr->buff1Addr = 0;
941 txbdPtr->buff2Addr = 0;
942 txbdPtr->buffLen = 0;
943 txbdPtr->controlStat = 0;
944 txbdPtr++;
945 }
946 }
947
948 /* Disable MAC Rx/Tx */
949 base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
950
951 /* Disable Rx DMA */
952 for (index = 0; index < handle->rxQueueUse; index++)
953 {
954 base->DMA_CH[index].DMA_CHX_RX_CTRL &= ~ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
955 }
956 }
957
958 /*!
959 * brief Deinitializes the ENET module.
960
961 * This function gates the module clock and disables the ENET module.
962 *
963 * param base ENET peripheral base address.
964 */
ENET_QOS_Deinit(ENET_QOS_Type * base)965 void ENET_QOS_Deinit(ENET_QOS_Type *base)
966 {
967 /* Reset first and wait for the complete
968 * The reset bit will automatically be cleared after complete. */
969 base->DMA_MODE |= ENET_QOS_DMA_MODE_SWR_MASK;
970 while ((base->DMA_MODE & ENET_QOS_DMA_MODE_SWR_MASK) != 0U)
971 {
972 }
973
974 #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL)
975 /* Disables the clock source. */
976 (void)CLOCK_DisableClock(s_enetqosClock[ENET_QOS_GetInstance(base)]);
977 #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */
978 }
979
980 /*!
981 * brief Initialize for all ENET descriptors.
982 *
983 * note This function is do all tx/rx descriptors initialization. Because this API
984 * read all interrupt registers first and then set the interrupt flag for all descriptos,
985 * if the interrupt register is set. so the descriptor initialization should be called
986 * after ENET_QOS_Init(), ENET_QOS_EnableInterrupts() and ENET_QOS_CreateHandle()(if transactional APIs
987 * are used).
988 *
989 * param base ENET peripheral base address.
990 * param config The configuration for ENET.
991 * param bufferConfig All buffers configuration.
992 */
ENET_QOS_DescriptorInit(ENET_QOS_Type * base,enet_qos_config_t * config,enet_qos_buffer_config_t * bufferConfig)993 status_t ENET_QOS_DescriptorInit(ENET_QOS_Type *base, enet_qos_config_t *config, enet_qos_buffer_config_t *bufferConfig)
994 {
995 assert(config != NULL);
996 assert(bufferConfig != NULL);
997
998 bool intTxEnable = false;
999 bool intRxEnable = false;
1000 uint8_t ringNum = 1;
1001 uint8_t txQueueUse = 1;
1002 uint8_t rxQueueUse = 1;
1003 uint8_t channel;
1004
1005 if (config->multiqueueCfg != NULL)
1006 {
1007 ringNum = MAX(config->multiqueueCfg->txQueueUse, config->multiqueueCfg->rxQueueUse);
1008 txQueueUse = config->multiqueueCfg->txQueueUse;
1009 rxQueueUse = config->multiqueueCfg->rxQueueUse;
1010 }
1011
1012 for (channel = 0; channel < ringNum; channel++)
1013 {
1014 intRxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK) != 0U) ? true : false;
1015 intTxEnable = ((base->DMA_CH[channel].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_TIE_MASK) != 0U) ? true : false;
1016
1017 if (channel < txQueueUse)
1018 {
1019 if ((ENET_QOS_TxDescriptorsInit(base, bufferConfig, intTxEnable, channel) != kStatus_Success))
1020 {
1021 return kStatus_Fail;
1022 }
1023 }
1024
1025 if (channel < rxQueueUse)
1026 {
1027 if ((ENET_QOS_RxDescriptorsInit(base, config, bufferConfig, intRxEnable, channel) != kStatus_Success))
1028 {
1029 return kStatus_Fail;
1030 }
1031 }
1032
1033 bufferConfig++;
1034 }
1035 return kStatus_Success;
1036 }
1037
1038 /*!
1039 * brief Allocates Rx buffers for all BDs.
1040 * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
1041 * will populate initial buffers in all BDs for receiving. Then ENET_QOS_GetRxFrame() is used
1042 * to get Rx frame with zero copy, it will allocate new buffer to replace the buffer in BD taken
1043 * by application application should free those buffers after they're used.
1044 *
1045 * note This function should be called after ENET_QOS_CreateHandler() and buffer allocating callback
1046 * function should be ready.
1047 *
1048 * param base ENET_QOS peripheral base address.
1049 * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1050 */
ENET_QOS_RxBufferAllocAll(ENET_QOS_Type * base,enet_qos_handle_t * handle)1051 status_t ENET_QOS_RxBufferAllocAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
1052 {
1053 status_t result = kStatus_Success;
1054 enet_qos_rx_bd_struct_t *rxbdPtr;
1055 uint32_t buffAddr;
1056 uint8_t channel;
1057 uint16_t index;
1058 uint16_t j;
1059
1060 if ((handle->rxBuffAlloc == NULL) || (handle->rxBuffFree == NULL))
1061 {
1062 return kStatus_ENET_QOS_InitMemoryFail;
1063 }
1064
1065 for (channel = 0; channel < handle->rxQueueUse; channel++)
1066 {
1067 /* Init the rxbdPtr to the receive descriptor start address. */
1068 rxbdPtr = handle->rxBdRing[channel].rxBdBase;
1069 for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
1070 {
1071 if (handle->doubleBuffEnable)
1072 {
1073 index = 2U * j;
1074 }
1075 else
1076 {
1077 index = j;
1078 }
1079
1080 buffAddr = (uint32_t)(uint32_t *)handle->rxBuffAlloc(base, handle->userData, channel);
1081 if (buffAddr == 0U)
1082 {
1083 result = kStatus_ENET_QOS_InitMemoryFail;
1084 break;
1085 }
1086
1087 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1088 buffAddr = MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
1089 #endif
1090 rxbdPtr->buff1Addr = buffAddr;
1091 handle->rxBufferStartAddr[channel][index] = buffAddr;
1092
1093 /* The second buffer is set with 0 because it is not required for normal case. */
1094 if (handle->doubleBuffEnable)
1095 {
1096 buffAddr = (uint32_t)(uint32_t *)handle->rxBuffAlloc(base, handle->userData, channel);
1097 if (buffAddr == 0U)
1098 {
1099 result = kStatus_ENET_QOS_InitMemoryFail;
1100 break;
1101 }
1102
1103 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1104 buffAddr = MEMORY_ConvertMemoryMapAddress(buffAddr, kMEMORY_Local2DMA);
1105 #endif
1106 rxbdPtr->buff2Addr = buffAddr;
1107 handle->rxBufferStartAddr[channel][index + 1U] = buffAddr;
1108 }
1109 else
1110 {
1111 rxbdPtr->buff2Addr = 0;
1112 }
1113
1114 /* Set the valid and DMA own flag.*/
1115 rxbdPtr->control |= ENET_QOS_RXDESCRIP_WR_OWN_MASK;
1116 rxbdPtr++;
1117 }
1118 }
1119
1120 if (result == kStatus_ENET_QOS_InitMemoryFail)
1121 {
1122 ENET_QOS_RxBufferFreeAll(base, handle);
1123 }
1124
1125 return result;
1126 }
1127
1128 /*!
1129 * brief Frees Rx buffers in all BDs.
1130 * It's used for zero copy Rx. In zero copy Rx case, Rx buffers are dynamic. This function
1131 * will free left buffers in all BDs.
1132 *
1133 * param base ENET_QOS peripheral base address.
1134 * param handle The ENET_QOS handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1135 */
ENET_QOS_RxBufferFreeAll(ENET_QOS_Type * base,enet_qos_handle_t * handle)1136 void ENET_QOS_RxBufferFreeAll(ENET_QOS_Type *base, enet_qos_handle_t *handle)
1137 {
1138 uint32_t buffAddr;
1139 uint8_t channel;
1140 uint16_t index;
1141 uint16_t j;
1142
1143 if (handle->rxBuffFree != NULL)
1144 {
1145 for (channel = 0; channel < handle->rxQueueUse; channel++)
1146 {
1147 for (j = 0U; j < handle->rxBdRing[channel].rxRingLen; j++)
1148 {
1149 if (handle->doubleBuffEnable)
1150 {
1151 index = 2U * j;
1152 }
1153 else
1154 {
1155 index = j;
1156 }
1157
1158 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1159 buffAddr = MEMORY_ConvertMemoryMapAddress((uint32_t)handle->rxBufferStartAddr[channel][index],
1160 kMEMORY_DMA2Local);
1161 #else
1162 buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index];
1163 #endif
1164 if (buffAddr != 0U)
1165 {
1166 handle->rxBuffFree(base, (void *)(uint32_t *)buffAddr, handle->userData, channel);
1167 }
1168
1169 /* The second buffer is set with 0 because it is not required for normal case. */
1170 if (handle->doubleBuffEnable)
1171 {
1172 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
1173 buffAddr = MEMORY_ConvertMemoryMapAddress((uint32_t)handle->rxBufferStartAddr[channel][index + 1],
1174 kMEMORY_DMA2Local);
1175 #else
1176 buffAddr = (uint32_t)handle->rxBufferStartAddr[channel][index + 1U];
1177 #endif
1178 if (buffAddr != 0U)
1179 {
1180 handle->rxBuffFree(base, (void *)(uint32_t *)buffAddr, handle->userData, channel);
1181 }
1182 }
1183 }
1184 }
1185 }
1186 }
1187
1188 /*!
1189 * brief Starts the ENET rx/tx.
1190 * This function enable the tx/rx and starts the rx/tx DMA.
1191 * This shall be set after ENET initialization and before
1192 * starting to receive the data.
1193 *
1194 * param base ENET peripheral base address.
1195 * param rxRingNum The number of the used rx rings. It shall not be
1196 * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
1197 * 1, the ring 0 will be used.
1198 * param txRingNum The number of the used tx rings. It shall not be
1199 * larger than the ENET_QOS_RING_NUM_MAX(2). If the ringNum is set with
1200 * 1, the ring 0 will be used.
1201 *
1202 * note This must be called after all the ENET initilization.
1203 * And should be called when the ENET receive/transmit is required.
1204 */
ENET_QOS_StartRxTx(ENET_QOS_Type * base,uint8_t txRingNum,uint8_t rxRingNum)1205 void ENET_QOS_StartRxTx(ENET_QOS_Type *base, uint8_t txRingNum, uint8_t rxRingNum)
1206 {
1207 assert(txRingNum != 0U);
1208 assert(rxRingNum != 0U);
1209
1210 uint8_t index;
1211
1212 if (txRingNum > ENET_QOS_RING_NUM_MAX)
1213 {
1214 txRingNum = ENET_QOS_RING_NUM_MAX;
1215 }
1216 if (rxRingNum > ENET_QOS_RING_NUM_MAX)
1217 {
1218 rxRingNum = ENET_QOS_RING_NUM_MAX;
1219 }
1220 /* Start/Acive the DMA first. */
1221 for (index = 0; index < rxRingNum; index++)
1222 {
1223 base->DMA_CH[index].DMA_CHX_RX_CTRL |= ENET_QOS_DMA_CHX_RX_CTRL_SR_MASK;
1224 }
1225 for (index = 0; index < txRingNum; index++)
1226 {
1227 base->DMA_CH[index].DMA_CHX_TX_CTRL |= ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1228 }
1229
1230 /* Enable the RX and TX at same time. */
1231 base->MAC_CONFIGURATION |= (ENET_QOS_MAC_CONFIGURATION_TE_MASK | ENET_QOS_MAC_CONFIGURATION_RE_MASK);
1232 }
1233
1234 /*!
1235 * brief Enables the ENET DMA and MAC interrupts.
1236 *
1237 * This function enables the ENET interrupt according to the provided mask. The mask
1238 * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1239 * For example, to enable the dma and mac interrupt, do the following.
1240 * code
1241 * ENET_QOS_EnableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
1242 * endcode
1243 *
1244 * param base ENET peripheral base address.
1245 * param mask ENET interrupts to enable. This is a logical OR of both
1246 * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1247 */
ENET_QOS_EnableInterrupts(ENET_QOS_Type * base,uint32_t mask)1248 void ENET_QOS_EnableInterrupts(ENET_QOS_Type *base, uint32_t mask)
1249 {
1250 uint32_t interrupt = mask & 0xFFFFU;
1251 uint8_t index;
1252
1253 /* For dma interrupt. */
1254 if (interrupt != 0U)
1255 {
1256 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
1257 {
1258 /* Set for all abnormal interrupts. */
1259 if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
1260 {
1261 interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
1262 }
1263 /* Set for all normal interrupts. */
1264 if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
1265 {
1266 interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1267 }
1268 base->DMA_CH[index].DMA_CHX_INT_EN = interrupt;
1269 }
1270 }
1271 interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
1272 if (interrupt != 0U)
1273 {
1274 /* MAC interrupt */
1275 base->MAC_INTERRUPT_ENABLE |= interrupt;
1276 }
1277 }
1278
1279 /*!
1280 * brief Clears the ENET mac interrupt events status flag.
1281 *
1282 * This function clears enabled ENET interrupts according to the provided mask. The mask
1283 * is a logical OR of enumeration members. See the ref enet_qos_mac_interrupt_enable_t.
1284 * For example, to clear the TX frame interrupt and RX frame interrupt, do the following.
1285 * code
1286 * ENET_QOS_ClearMacInterruptStatus(ENET, kENET_QOS_MacPmt);
1287 * endcode
1288 *
1289 * param base ENET peripheral base address.
1290 * param mask ENET interrupt source to be cleared.
1291 * This is the logical OR of members of the enumeration :: enet_qos_mac_interrupt_enable_t.
1292 */
ENET_QOS_ClearMacInterruptStatus(ENET_QOS_Type * base,uint32_t mask)1293 void ENET_QOS_ClearMacInterruptStatus(ENET_QOS_Type *base, uint32_t mask)
1294 {
1295 volatile uint32_t dummy;
1296
1297 if ((mask & (uint32_t)kENET_QOS_MacTimestamp) != 0U)
1298 {
1299 dummy = base->MAC_TIMESTAMP_STATUS;
1300 }
1301 else if ((mask & (uint32_t)kENET_QOS_MacPmt) != 0U)
1302 {
1303 dummy = base->MAC_PMT_CONTROL_STATUS;
1304 }
1305 else
1306 {
1307 /* Add for avoid the misra 2004 rule 14.10 */
1308 }
1309 (void)dummy;
1310 }
1311
1312 /*!
1313 * brief Disables the ENET DMA and MAC interrupts.
1314 *
1315 * This function disables the ENET interrupt according to the provided mask. The mask
1316 * is a logical OR of enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1317 * For example, to disable the dma and mac interrupt, do the following.
1318 * code
1319 * ENET_QOS_DisableInterrupts(ENET, kENET_QOS_DmaRx | kENET_QOS_DmaTx | kENET_QOS_MacPmt);
1320 * endcode
1321 *
1322 * param base ENET peripheral base address.
1323 * param mask ENET interrupts to disables. This is a logical OR of both
1324 * enumeration :: enet_qos_dma_interrupt_enable_t and enet_qos_mac_interrupt_enable_t.
1325 */
ENET_QOS_DisableInterrupts(ENET_QOS_Type * base,uint32_t mask)1326 void ENET_QOS_DisableInterrupts(ENET_QOS_Type *base, uint32_t mask)
1327 {
1328 uint32_t interrupt = mask & 0xFFFFU;
1329 uint8_t index;
1330
1331 /* For dma interrupt. */
1332 if (interrupt != 0U)
1333 {
1334 for (index = 0; index < ENET_QOS_RING_NUM_MAX; index++)
1335 {
1336 /* Set for all abnormal interrupts. */
1337 if ((ENET_QOS_ABNORM_INT_MASK & interrupt) != 0U)
1338 {
1339 interrupt |= ENET_QOS_DMA_CHX_INT_EN_AIE_MASK;
1340 }
1341 /* Set for all normal interrupts. */
1342 if ((ENET_QOS_NORM_INT_MASK & interrupt) != 0U)
1343 {
1344 interrupt |= ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1345 }
1346 base->DMA_CH[index].DMA_CHX_INT_EN &= ~interrupt;
1347 }
1348 }
1349 interrupt = mask >> ENET_QOS_MACINT_ENUM_OFFSET;
1350 if (interrupt != 0U)
1351 {
1352 /* MAC interrupt */
1353 base->MAC_INTERRUPT_ENABLE &= ~interrupt;
1354 }
1355 }
1356
1357 /*!
1358 * @brief Set the second level IRQ handler, allow user to overwrite the default
1359 * second level weak IRQ handler.
1360 *
1361 * @param ISRHandler he handler to install.
1362 */
ENET_QOS_SetISRHandler(ENET_QOS_Type * base,enet_qos_isr_t ISRHandler)1363 void ENET_QOS_SetISRHandler(ENET_QOS_Type *base, enet_qos_isr_t ISRHandler)
1364 {
1365 /* Update IRQ entry. */
1366 s_enetqosIsr = ISRHandler;
1367 /* Enable NVIC. */
1368 (void)EnableIRQ(s_enetqosIrqId[ENET_QOS_GetInstance(base)]);
1369 }
1370
1371 /*!
1372 * brief Create ENET Handler
1373 *
1374 * This is a transactional API and it's provided to store all datas which are needed
1375 * during the whole transactional process. This API should not be used when you use
1376 * functional APIs to do data tx/rx. This is funtion will store many data/flag for
1377 * transactional use, so all configure API such as ENET_QOS_Init(), ENET_QOS_DescriptorInit(),
1378 * ENET_QOS_EnableInterrupts() etc.
1379 *
1380 * note as our transactional transmit API use the zero-copy transmit buffer.
1381 * so there are two thing we emphasize here:
1382 * 1. tx buffer free/requeue for application should be done in the tx
1383 * interrupt handler. Please set callback: kENET_QOS_TxIntEvent with tx buffer free/requeue
1384 * process APIs.
1385 * 2. the tx interrupt is forced to open.
1386 *
1387 * param base ENET peripheral base address.
1388 * param handle ENET handler.
1389 * param config ENET configuration.
1390 * param bufferConfig ENET buffer configuration.
1391 * param callback The callback function.
1392 * param userData The application data.
1393 */
ENET_QOS_CreateHandler(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_config_t * config,enet_qos_buffer_config_t * bufferConfig,enet_qos_callback_t callback,void * userData)1394 void ENET_QOS_CreateHandler(ENET_QOS_Type *base,
1395 enet_qos_handle_t *handle,
1396 enet_qos_config_t *config,
1397 enet_qos_buffer_config_t *bufferConfig,
1398 enet_qos_callback_t callback,
1399 void *userData)
1400 {
1401 assert(config != NULL);
1402 assert(bufferConfig != NULL);
1403 assert(callback != NULL);
1404
1405 uint8_t ringNum = 1;
1406 uint8_t count = 0;
1407 uint32_t rxIntEnable = 0;
1408 uint8_t txQueueUse = 1;
1409 uint8_t rxQueueUse = 1;
1410 enet_qos_buffer_config_t *buffConfig = bufferConfig;
1411
1412 /* Store transfer parameters in handle pointer. */
1413 (void)memset(handle, 0, sizeof(enet_qos_handle_t));
1414
1415 if (config->multiqueueCfg != NULL)
1416 {
1417 txQueueUse = config->multiqueueCfg->txQueueUse;
1418 rxQueueUse = config->multiqueueCfg->rxQueueUse;
1419 ringNum = MAX(txQueueUse, rxQueueUse);
1420 }
1421
1422 handle->txQueueUse = txQueueUse;
1423 handle->rxQueueUse = rxQueueUse;
1424
1425 if ((config->specialControl & (uint32_t)kENET_QOS_DescDoubleBuffer) != 0U)
1426 {
1427 handle->doubleBuffEnable = true;
1428 }
1429
1430 for (count = 0; count < ringNum; count++)
1431 {
1432 if (count < txQueueUse)
1433 {
1434 handle->txBdRing[count].txBdBase = buffConfig->txDescStartAddrAlign;
1435 handle->txBdRing[count].txRingLen = buffConfig->txRingLen;
1436 handle->txBdRing[count].txGenIdx = 0;
1437 handle->txBdRing[count].txConsumIdx = 0;
1438 handle->txBdRing[count].txDescUsed = 0;
1439
1440 handle->txDirtyRing[count].txDirtyBase = buffConfig->txDirtyStartAddr;
1441 handle->txDirtyRing[count].txRingLen = buffConfig->txRingLen;
1442 handle->txDirtyRing[count].txGenIdx = 0;
1443 handle->txDirtyRing[count].txConsumIdx = 0;
1444
1445 /* Enable tx interrupt for use transactional API to do tx buffer free/requeue. */
1446 base->DMA_CH[count].DMA_CHX_INT_EN |= ENET_QOS_DMA_CHX_INT_EN_TIE_MASK | ENET_QOS_DMA_CHX_INT_EN_NIE_MASK;
1447 }
1448
1449 if (count < rxQueueUse)
1450 {
1451 handle->rxBdRing[count].rxBdBase = buffConfig->rxDescStartAddrAlign;
1452 handle->rxBdRing[count].rxGenIdx = 0;
1453 handle->rxBdRing[count].rxRingLen = buffConfig->rxRingLen;
1454 handle->rxBdRing[count].rxBuffSizeAlign = buffConfig->rxBuffSizeAlign;
1455
1456 /* Record rx buffer address for re-init Rx buffer descriptor */
1457 handle->rxBufferStartAddr[count] = buffConfig->rxBufferStartAddr;
1458
1459 /* Record rx buffer need cache maintain */
1460 handle->rxMaintainEnable[count] = buffConfig->rxBuffNeedMaintain;
1461
1462 /* Check if the rx interrrupt is enabled. */
1463 rxIntEnable |= (base->DMA_CH[count].DMA_CHX_INT_EN & ENET_QOS_DMA_CHX_INT_EN_RIE_MASK);
1464 }
1465
1466 buffConfig++;
1467 }
1468
1469 handle->rxintEnable = (rxIntEnable != 0U) ? true : false;
1470
1471 /* Save the handle pointer in the global variables. */
1472 s_ENETHandle[ENET_QOS_GetInstance(base)] = handle;
1473
1474 /* Set Rx alloc/free callback. */
1475 handle->rxBuffAlloc = config->rxBuffAlloc;
1476 handle->rxBuffFree = config->rxBuffFree;
1477
1478 /* Set callback and userData. */
1479 handle->callback = callback;
1480 handle->userData = userData;
1481
1482 /* Use default ENET_QOS_CommonIRQHandler as default weak IRQ handler. */
1483 ENET_QOS_SetISRHandler(base, ENET_QOS_CommonIRQHandler);
1484 }
1485
1486 /*!
1487 * brief Gets the ENET module Mac address.
1488 *
1489 * param base ENET peripheral base address.
1490 * param macAddr The six-byte Mac address pointer.
1491 * The pointer is allocated by application and input into the API.
1492 */
ENET_QOS_GetMacAddr(ENET_QOS_Type * base,uint8_t * macAddr,uint8_t index)1493 void ENET_QOS_GetMacAddr(ENET_QOS_Type *base, uint8_t *macAddr, uint8_t index)
1494 {
1495 assert(macAddr != NULL);
1496
1497 uint32_t address = base->MAC_ADDRESS[index].LOW;
1498
1499 /* Get from physical address lower register. */
1500 macAddr[2] = (uint8_t)(0xFFU & (address >> 24U));
1501 macAddr[3] = (uint8_t)(0xFFU & (address >> 16U));
1502 macAddr[4] = (uint8_t)(0xFFU & (address >> 8U));
1503 macAddr[5] = (uint8_t)(0xFFU & address);
1504
1505 /* Get from physical address high register. */
1506 address = base->MAC_ADDRESS[index].HIGH;
1507 macAddr[0] = (uint8_t)(0xFFU & (address >> 8U));
1508 macAddr[1] = (uint8_t)(0xFFU & address);
1509 }
1510
1511 /*!
1512 * brief Adds the ENET_QOS device to a multicast group.
1513 *
1514 * param base ENET_QOS peripheral base address.
1515 * param address The six-byte multicast group address which is provided by application.
1516 */
ENET_QOS_AddMulticastGroup(ENET_QOS_Type * base,uint8_t * address)1517 void ENET_QOS_AddMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
1518 {
1519 assert(address != NULL);
1520
1521 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
1522 uint32_t crc = 0xFFFFFFFFU;
1523 uint32_t count1 = 0;
1524 uint32_t count2 = 0;
1525
1526 /* Calculates the CRC-32 polynomial on the multicast group address. */
1527 for (count1 = 0; count1 < 6U; count1++)
1528 {
1529 uint8_t c = address[count1];
1530 for (count2 = 0; count2 < 0x08U; count2++)
1531 {
1532 if (((c ^ crc) & 1U) != 0U)
1533 {
1534 crc >>= 1U;
1535 c >>= 1U;
1536 crc ^= 0xEDB88320U;
1537 }
1538 else
1539 {
1540 crc >>= 1U;
1541 c >>= 1U;
1542 }
1543 }
1544 }
1545
1546 /* Calculate bitwise reverse value. */
1547 crc = ENET_QOS_ReverseBits(~crc);
1548
1549 /* Get highest 6 bits*/
1550 crc = crc >> 26U;
1551
1552 handle->multicastCount[crc]++;
1553
1554 if (0U != (crc & 0x20U))
1555 {
1556 base->MAC_HASH_TABLE_REG1 |= (1UL << (crc & 0x1FU));
1557 }
1558 else
1559 {
1560 base->MAC_HASH_TABLE_REG0 |= (1UL << (crc & 0x1FU));
1561 }
1562 }
1563
1564 /*!
1565 * brief Moves the ENET_QOS device from a multicast group.
1566 *
1567 * param base ENET_QOS peripheral base address.
1568 * param address The six-byte multicast group address which is provided by application.
1569 */
ENET_QOS_LeaveMulticastGroup(ENET_QOS_Type * base,uint8_t * address)1570 void ENET_QOS_LeaveMulticastGroup(ENET_QOS_Type *base, uint8_t *address)
1571 {
1572 assert(address != NULL);
1573
1574 enet_qos_handle_t *handle = s_ENETHandle[ENET_QOS_GetInstance(base)];
1575 uint32_t crc = 0xFFFFFFFFU;
1576 uint32_t count1 = 0;
1577 uint32_t count2 = 0;
1578
1579 /* Calculates the CRC-32 polynomial on the multicast group address. */
1580 for (count1 = 0; count1 < 6U; count1++)
1581 {
1582 uint8_t c = address[count1];
1583 for (count2 = 0; count2 < 0x08U; count2++)
1584 {
1585 if (((c ^ crc) & 1U) != 0U)
1586 {
1587 crc >>= 1U;
1588 c >>= 1U;
1589 crc ^= 0xEDB88320U;
1590 }
1591 else
1592 {
1593 crc >>= 1U;
1594 c >>= 1U;
1595 }
1596 }
1597 }
1598
1599 /* Calculate bitwise reverse value. */
1600 crc = ENET_QOS_ReverseBits(~crc);
1601
1602 /* Get highest 6 bits*/
1603 crc = crc >> 26U;
1604
1605 handle->multicastCount[crc]--;
1606
1607 /* Set the hash table if no collisions */
1608 if (0U == handle->multicastCount[crc])
1609 {
1610 if (0U != (crc & 0x20U))
1611 {
1612 base->MAC_HASH_TABLE_REG1 &= ~((1UL << (crc & 0x1FU)));
1613 }
1614 else
1615 {
1616 base->MAC_HASH_TABLE_REG0 &= ~((1UL << (crc & 0x1FU)));
1617 }
1618 }
1619 }
1620
1621 /*!
1622 * brief Sets the ENET SMI(serial management interface)- MII management interface.
1623 *
1624 * param base ENET peripheral base address.
1625 */
ENET_QOS_SetSMI(ENET_QOS_Type * base,uint32_t csrClock_Hz)1626 void ENET_QOS_SetSMI(ENET_QOS_Type *base, uint32_t csrClock_Hz)
1627 {
1628 uint32_t crDiv = 0;
1629 uint32_t srcClock_Hz = csrClock_Hz / 1000000U;
1630
1631 assert((srcClock_Hz >= 20U) && (srcClock_Hz < 800U));
1632
1633 if (srcClock_Hz < 35U)
1634 {
1635 crDiv = 2;
1636 }
1637 else if (srcClock_Hz < 60U)
1638 {
1639 crDiv = 3;
1640 }
1641 else if (srcClock_Hz < 100U)
1642 {
1643 crDiv = 0;
1644 }
1645 else if (srcClock_Hz < 150U)
1646 {
1647 crDiv = 1;
1648 }
1649 else if (srcClock_Hz < 250U)
1650 {
1651 crDiv = 4;
1652 }
1653 else if (srcClock_Hz < 300U)
1654 {
1655 crDiv = 5;
1656 }
1657 else if (srcClock_Hz < 500U)
1658 {
1659 crDiv = 6;
1660 }
1661 else if (srcClock_Hz < 800U)
1662 {
1663 crDiv = 7;
1664 }
1665 else
1666 {
1667 /* Empty else */
1668 }
1669
1670 base->MAC_MDIO_ADDRESS = ENET_QOS_MAC_MDIO_ADDRESS_CR(crDiv);
1671 }
1672
1673 /*!
1674 * brief Starts a SMI write command.
1675 * It supports MDIO IEEE802.3 Clause 22.
1676 * After send command, user needs to check whether the transmission is over
1677 * with ENET_QOS_IsSMIBusy().
1678 *
1679 * param base ENET peripheral base address.
1680 * param phyAddr The PHY address.
1681 * param phyReg The PHY register.
1682 * param data The data written to PHY.
1683 */
ENET_QOS_StartSMIWrite(ENET_QOS_Type * base,uint32_t phyAddr,uint32_t phyReg,uint32_t data)1684 void ENET_QOS_StartSMIWrite(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t phyReg, uint32_t data)
1685 {
1686 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1687
1688 /* Build MII write command. */
1689 base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiWriteFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
1690 ENET_QOS_MAC_MDIO_ADDRESS_RDA(phyReg);
1691 base->MAC_MDIO_DATA = data;
1692 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1693 }
1694
1695 /*!
1696 * brief Starts an SMI read command.
1697 * It supports MDIO IEEE802.3 Clause 22.
1698 * After send command, user needs to check whether the transmission is over
1699 * with ENET_QOS_IsSMIBusy().
1700 *
1701 * param base ENET peripheral base address.
1702 * param phyAddr The PHY address.
1703 * param phyReg The PHY register.
1704 */
ENET_QOS_StartSMIRead(ENET_QOS_Type * base,uint32_t phyAddr,uint32_t phyReg)1705 void ENET_QOS_StartSMIRead(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t phyReg)
1706 {
1707 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1708
1709 /* Build MII read command. */
1710 base->MAC_MDIO_ADDRESS = reg | (uint32_t)kENET_QOS_MiiReadFrame | ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) |
1711 ENET_QOS_MAC_MDIO_ADDRESS_RDA(phyReg);
1712 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1713 }
1714
1715 /*!
1716 * brief Starts a SMI write command.
1717 * It supports MDIO IEEE802.3 Clause 45.
1718 * After send command, user needs to check whether the transmission is over
1719 * with ENET_QOS_IsSMIBusy().
1720 *
1721 * param base ENET peripheral base address.
1722 * param phyAddr The PHY address.
1723 * param device The PHY device type.
1724 * param phyReg The PHY register address.
1725 * param data The data written to PHY.
1726 */
ENET_QOS_StartExtC45SMIWrite(ENET_QOS_Type * base,uint32_t phyAddr,uint32_t device,uint32_t phyReg,uint32_t data)1727 void ENET_QOS_StartExtC45SMIWrite(
1728 ENET_QOS_Type *base, uint32_t phyAddr, uint32_t device, uint32_t phyReg, uint32_t data)
1729 {
1730 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1731
1732 /* Build MII write command. */
1733 base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiWriteFrame |
1734 ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(device);
1735 base->MAC_MDIO_DATA = data | ENET_QOS_MAC_MDIO_DATA_RA(phyReg);
1736 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1737 }
1738
1739 /*!
1740 * brief Starts a SMI write command.
1741 * It supports MDIO IEEE802.3 Clause 45.
1742 * After send command, user needs to check whether the transmission is over
1743 * with ENET_QOS_IsSMIBusy().
1744 *
1745 * param base ENET peripheral base address.
1746 * param phyAddr The PHY address.
1747 * param device The PHY device type.
1748 * param phyReg The PHY register address.
1749 */
ENET_QOS_StartExtC45SMIRead(ENET_QOS_Type * base,uint32_t phyAddr,uint32_t device,uint32_t phyReg)1750 void ENET_QOS_StartExtC45SMIRead(ENET_QOS_Type *base, uint32_t phyAddr, uint32_t device, uint32_t phyReg)
1751 {
1752 uint32_t reg = base->MAC_MDIO_ADDRESS & ENET_QOS_MAC_MDIO_ADDRESS_CR_MASK;
1753
1754 /* Build MII read command. */
1755 base->MAC_MDIO_ADDRESS = reg | ENET_QOS_MAC_MDIO_ADDRESS_C45E_MASK | (uint32_t)kENET_QOS_MiiReadFrame |
1756 ENET_QOS_MAC_MDIO_ADDRESS_PA(phyAddr) | ENET_QOS_MAC_MDIO_ADDRESS_RDA(device);
1757 base->MAC_MDIO_DATA = ENET_QOS_MAC_MDIO_DATA_RA(phyReg);
1758 base->MAC_MDIO_ADDRESS |= ENET_QOS_MAC_MDIO_ADDRESS_GB_MASK;
1759 }
1760
1761 /*!
1762 * brief Set the MAC to enter into power down mode.
1763 * the remote power wake up frame and magic frame can wake up
1764 * the ENET from the power down mode.
1765 *
1766 * param base ENET peripheral base address.
1767 * param wakeFilter The wakeFilter provided to configure the wake up frame fitlter.
1768 * Set the wakeFilter to NULL is not required. But if you have the filter requirement,
1769 * please make sure the wakeFilter pointer shall be eight continous
1770 * 32-bits configuration.
1771 */
ENET_QOS_EnterPowerDown(ENET_QOS_Type * base,uint32_t * wakeFilter)1772 void ENET_QOS_EnterPowerDown(ENET_QOS_Type *base, uint32_t *wakeFilter)
1773 {
1774 uint8_t index;
1775 uint32_t *reg = wakeFilter;
1776
1777 /* Disable the tx dma. */
1778 base->DMA_CH[0].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1779 base->DMA_CH[1].DMA_CHX_TX_CTRL &= ~ENET_QOS_DMA_CHX_TX_CTRL_ST_MASK;
1780
1781 /* Disable the mac tx/rx. */
1782 base->MAC_CONFIGURATION &= ~(ENET_QOS_MAC_CONFIGURATION_RE_MASK | ENET_QOS_MAC_CONFIGURATION_TE_MASK);
1783 /* Enable the remote wakeup packet and enable the power down mode. */
1784 if (wakeFilter != NULL)
1785 {
1786 for (index = 0; index < ENET_QOS_WAKEUPFILTER_NUM; index++)
1787 {
1788 base->MAC_RWK_PACKET_FILTER = *reg;
1789 reg++;
1790 }
1791 }
1792 base->MAC_PMT_CONTROL_STATUS = ENET_QOS_MAC_PMT_CONTROL_STATUS_MGKPKTEN_MASK |
1793 ENET_QOS_MAC_PMT_CONTROL_STATUS_RWKPKTEN_MASK |
1794 ENET_QOS_MAC_PMT_CONTROL_STATUS_PWRDWN_MASK;
1795
1796 /* Enable the MAC rx. */
1797 base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
1798 }
1799
1800 /*!
1801 * brief Enable/Disable Rx parser, please notice that for enable/disable Rx Parser,
1802 * should better disable Receive first.
1803 *
1804 * param base ENET_QOS peripheral base address.
1805 * param enable Enable/Disable Rx parser function
1806 */
ENET_QOS_EnableRxParser(ENET_QOS_Type * base,bool enable)1807 status_t ENET_QOS_EnableRxParser(ENET_QOS_Type *base, bool enable)
1808 {
1809 status_t result = kStatus_Success;
1810
1811 if (enable)
1812 {
1813 base->MTL_OPERATION_MODE |= ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
1814 }
1815 else
1816 {
1817 base->MTL_OPERATION_MODE &= ~ENET_QOS_MTL_OPERATION_MODE_FRPE_MASK;
1818 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_CONTROL_STATUS), ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK,
1819 ENET_QOS_MTL_RXP_CONTROL_STATUS_RXPI_MASK);
1820 }
1821
1822 return result;
1823 }
1824
1825 /*!
1826 * brief Gets the size of the read frame.
1827 * This function gets a received frame size from the ENET buffer descriptors.
1828 * note The FCS of the frame is automatically removed by MAC and the size is the length without the FCS.
1829 * After calling ENET_QOS_GetRxFrameSize, ENET_QOS_ReadFrame() should be called to update the
1830 * receive buffers If the result is not "kStatus_ENET_QOS_RxFrameEmpty".
1831 *
1832 * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1833 * param length The length of the valid frame received.
1834 * param channel The DMAC channel for the rx.
1835 * retval kStatus_ENET_QOS_RxFrameEmpty No frame received. Should not call ENET_QOS_ReadFrame to read frame.
1836 * retval kStatus_ENET_QOS_RxFrameError Data error happens. ENET_QOS_ReadFrame should be called with NULL data
1837 * and NULL length to update the receive buffers.
1838 * retval kStatus_Success Receive a frame Successfully then the ENET_QOS_ReadFrame
1839 * should be called with the right data buffer and the captured data length input.
1840 */
ENET_QOS_GetRxFrameSize(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint32_t * length,uint8_t channel)1841 status_t ENET_QOS_GetRxFrameSize(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint32_t *length, uint8_t channel)
1842 {
1843 assert(handle != NULL);
1844 assert(length != NULL);
1845
1846 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
1847 enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
1848 uint16_t index = rxBdRing->rxGenIdx;
1849 uint32_t control = rxDesc->control;
1850
1851 /* Reset the length to zero. */
1852 *length = 0;
1853
1854 if ((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
1855 {
1856 return kStatus_ENET_QOS_RxFrameEmpty;
1857 }
1858 else
1859 {
1860 do
1861 {
1862 /* Application owns the buffer descriptor, get the length. */
1863 if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
1864 {
1865 if ((control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
1866 {
1867 return kStatus_ENET_QOS_RxFrameError;
1868 }
1869 *length = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
1870 return kStatus_Success;
1871 }
1872
1873 index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
1874 rxDesc = &rxBdRing->rxBdBase[index];
1875 control = rxDesc->control;
1876 } while (index != rxBdRing->rxGenIdx);
1877
1878 return kStatus_ENET_QOS_RxFrameError;
1879 }
1880 }
1881
ENET_QOS_DropFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t channel)1882 static void ENET_QOS_DropFrame(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
1883 {
1884 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
1885 enet_qos_rx_bd_struct_t *rxDesc;
1886 uint16_t index = rxBdRing->rxGenIdx;
1887 bool tsAvailable = false;
1888 uint32_t buff1Addr = 0;
1889 uint32_t buff2Addr = 0;
1890
1891 /* Not check DMA ownership here, assume there's at least one valid frame left in BD ring */
1892 do
1893 {
1894 /* Get the control flag. */
1895 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
1896
1897 if (!handle->doubleBuffEnable)
1898 {
1899 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
1900 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
1901 handle->doubleBuffEnable);
1902 }
1903 else
1904 {
1905 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
1906 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
1907 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
1908 handle->rxintEnable, handle->doubleBuffEnable);
1909 }
1910
1911 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
1912
1913 /* Find the last buffer descriptor for the frame. */
1914 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
1915 {
1916 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
1917 {
1918 if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
1919 {
1920 tsAvailable = true;
1921 }
1922 }
1923
1924 /* Reinit for the context descriptor which has been updated by DMA. */
1925 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
1926
1927 if (tsAvailable && ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U))
1928 {
1929 if (!handle->doubleBuffEnable)
1930 {
1931 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
1932 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
1933 handle->doubleBuffEnable);
1934 }
1935 else
1936 {
1937 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
1938 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
1939 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
1940 handle->rxintEnable, handle->doubleBuffEnable);
1941 }
1942 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
1943 }
1944 break;
1945 }
1946 } while (rxBdRing->rxGenIdx != index);
1947
1948 /* Always try to start receive, in case it had stopped */
1949 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
1950 }
1951
1952 /*!
1953 * brief Reads a frame from the ENET device.
1954 * This function reads a frame from the ENET DMA descriptors.
1955 * The ENET_QOS_GetRxFrameSize should be used to get the size of the prepared data buffer.
1956 * For example use rx dma channel 0:
1957 * code
1958 * uint32_t length;
1959 * enet_qos_handle_t g_handle;
1960 * enet_qos_ptp_time_t ts;
1961 * status = ENET_QOS_GetRxFrameSize(&g_handle, &length, 0);
1962 * if (length != 0)
1963 * {
1964 * uint8_t *data = memory allocate interface;
1965 * if (!data)
1966 * {
1967 * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
1968 * }
1969 * else
1970 * {
1971 * status = ENET_QOS_ReadFrame(ENET, &g_handle, data, length, 0, &ts);
1972 * }
1973 * }
1974 * else if (status == kStatus_ENET_QOS_RxFrameError)
1975 * {
1976 * ENET_QOS_ReadFrame(ENET, &g_handle, NULL, 0, 0, &ts);
1977 * }
1978 * endcode
1979 * param base ENET peripheral base address.
1980 * param handle The ENET handler structure. This is the same handler pointer used in the ENET_QOS_Init.
1981 * param data The data buffer provided by user to store the frame which memory size should be at least "length".
1982 * param length The size of the data buffer which is still the length of the received frame.
1983 * param channel The rx DMA channel. shall not be larger than 2.
1984 * return The execute status, successful or failure.
1985 */
ENET_QOS_ReadFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t * data,uint32_t length,uint8_t channel,enet_qos_ptp_time_t * ts)1986 status_t ENET_QOS_ReadFrame(ENET_QOS_Type *base,
1987 enet_qos_handle_t *handle,
1988 uint8_t *data,
1989 uint32_t length,
1990 uint8_t channel,
1991 enet_qos_ptp_time_t *ts)
1992 {
1993 assert(handle != NULL);
1994 assert(channel < handle->rxQueueUse);
1995
1996 uint32_t len = 0;
1997 uint32_t offset = 0;
1998 uint32_t control;
1999 bool isLastBuff = false;
2000 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2001 enet_qos_rx_bd_struct_t *rxDesc;
2002 status_t result = kStatus_Fail;
2003 uint32_t buff1Addr = 0; /*!< Buffer 1 address */
2004 uint32_t buff2Addr = 0; /*!< Buffer 2 or next descriptor address */
2005
2006 bool tsAvailable = false;
2007
2008 /* For data-NULL input, only update the buffer descriptor. */
2009 if (data == NULL)
2010 {
2011 ENET_QOS_DropFrame(base, handle, channel);
2012 result = kStatus_Success;
2013 }
2014 else
2015 {
2016 while ((!isLastBuff))
2017 {
2018 /* The last buffer descriptor of a frame. */
2019 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2020 control = rxDesc->control;
2021
2022 if (!handle->doubleBuffEnable)
2023 {
2024 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2025 if (handle->rxMaintainEnable[channel])
2026 {
2027 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2028 /* Add the cache invalidate maintain. */
2029 DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2030 rxBdRing->rxBuffSizeAlign);
2031 #else
2032 /* Add the cache invalidate maintain. */
2033 DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2034 #endif
2035 }
2036 }
2037 else
2038 {
2039 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2040 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2041 if (handle->rxMaintainEnable[channel])
2042 {
2043 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2044 /* Add the cache invalidate maintain. */
2045 DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2046 rxBdRing->rxBuffSizeAlign);
2047 /* Add the cache invalidate maintain. */
2048 DCACHE_InvalidateByRange(MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2049 rxBdRing->rxBuffSizeAlign);
2050 #else
2051 /* Add the cache invalidate maintain. */
2052 DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2053 /* Add the cache invalidate maintain. */
2054 DCACHE_InvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
2055 #endif
2056 }
2057 }
2058
2059 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2060
2061 if ((control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2062 {
2063 /* This is a valid frame. */
2064 isLastBuff = true;
2065
2066 /* Remove FCS */
2067 len = (control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) - ENET_QOS_FCS_LEN;
2068
2069 if (length == len)
2070 {
2071 /* Copy the frame to user's buffer. */
2072 len -= offset;
2073
2074 if (len > rxBdRing->rxBuffSizeAlign)
2075 {
2076 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2077 (void)memcpy((void *)&data[offset],
2078 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2079 rxBdRing->rxBuffSizeAlign);
2080 #else
2081 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
2082 #endif
2083 offset += rxBdRing->rxBuffSizeAlign;
2084 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2085 (void)memcpy((void *)&data[offset],
2086 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2087 len - rxBdRing->rxBuffSizeAlign);
2088 #else
2089 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr,
2090 len - rxBdRing->rxBuffSizeAlign);
2091 #endif
2092 }
2093 else
2094 {
2095 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2096 (void)memcpy((void *)&data[offset],
2097 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2098 len);
2099 #else
2100 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, len);
2101 #endif
2102 }
2103
2104 result = kStatus_Success;
2105 }
2106
2107 if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
2108 {
2109 tsAvailable = true;
2110 }
2111 /* Updates the receive buffer descriptors. */
2112 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2113 handle->rxintEnable, handle->doubleBuffEnable);
2114 /* Store the rx timestamp which is in the next buffer descriptor of the last
2115 * descriptor of a frame. */
2116 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2117 control = rxDesc->control;
2118
2119 /* If tsAvailable is true, a context descriptor is expected but might not be yet
2120 * available.
2121 */
2122 if (tsAvailable)
2123 {
2124 uint8_t retryTimes = 10;
2125
2126 while (((control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
2127 ((control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
2128 {
2129 SDK_DelayAtLeastUs(1U, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
2130 if (0U == retryTimes--)
2131 {
2132 assert(false);
2133 }
2134 control = rxDesc->control;
2135 }
2136 }
2137
2138 /* Reinit for the context descritor which has been updated by DMA. */
2139 if ((control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U)
2140 {
2141 if (tsAvailable && (NULL != ts))
2142 {
2143 ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, ts);
2144 }
2145
2146 if (!handle->doubleBuffEnable)
2147 {
2148 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2149 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
2150 handle->doubleBuffEnable);
2151 }
2152 else
2153 {
2154 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2155 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2156 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2157 handle->rxintEnable, handle->doubleBuffEnable);
2158 }
2159 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2160 }
2161 }
2162 else
2163 {
2164 /* Store a frame on several buffer descriptors. */
2165 isLastBuff = false;
2166 /* Length check. */
2167 if (offset >= length)
2168 {
2169 /* Updates the receive buffer descriptors. */
2170 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2171 handle->rxintEnable, handle->doubleBuffEnable);
2172 break;
2173 }
2174
2175 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2176 (void)memcpy((void *)&data[offset],
2177 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local),
2178 rxBdRing->rxBuffSizeAlign);
2179 #else
2180 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff1Addr, rxBdRing->rxBuffSizeAlign);
2181 #endif
2182
2183 offset += rxBdRing->rxBuffSizeAlign;
2184 if (buff2Addr != 0U)
2185 {
2186 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2187 (void)memcpy((void *)&data[offset],
2188 (void *)(uint8_t *)MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local),
2189 rxBdRing->rxBuffSizeAlign);
2190 #else
2191 (void)memcpy((void *)&data[offset], (void *)(uint8_t *)buff2Addr, rxBdRing->rxBuffSizeAlign);
2192 #endif
2193 offset += rxBdRing->rxBuffSizeAlign;
2194 }
2195
2196 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2197 handle->rxintEnable, handle->doubleBuffEnable);
2198 }
2199 }
2200
2201 /* Always try to start receive, in case it had stopped */
2202 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR = (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
2203 }
2204
2205 return result;
2206 }
2207
2208 /*!
2209 * brief Updates the buffers and the own status for a given rx descriptor.
2210 * This function is a low level functional API to Updates the
2211 * buffers and the own status for a given rx descriptor.
2212 *
2213 * param rxDesc The given rx descriptor.
2214 * param buffer1 The first buffer address in the descriptor.
2215 * param buffer2 The second buffer address in the descriptor.
2216 * param intEnable Interrupt enable flag.
2217 * param doubleBuffEnable The double buffer enable flag.
2218 *
2219 * note This must be called after all the ENET initilization.
2220 * And should be called when the ENET receive/transmit is required.
2221 */
ENET_QOS_UpdateRxDescriptor(enet_qos_rx_bd_struct_t * rxDesc,void * buffer1,void * buffer2,bool intEnable,bool doubleBuffEnable)2222 void ENET_QOS_UpdateRxDescriptor(
2223 enet_qos_rx_bd_struct_t *rxDesc, void *buffer1, void *buffer2, bool intEnable, bool doubleBuffEnable)
2224 {
2225 assert(rxDesc != NULL);
2226 uint32_t control = ENET_QOS_RXDESCRIP_RD_OWN_MASK | ENET_QOS_RXDESCRIP_RD_BUFF1VALID_MASK;
2227
2228 if (intEnable)
2229 {
2230 control |= ENET_QOS_RXDESCRIP_RD_IOC_MASK;
2231 }
2232
2233 if (doubleBuffEnable)
2234 {
2235 control |= ENET_QOS_RXDESCRIP_RD_BUFF2VALID_MASK;
2236 }
2237
2238 /* Update the buffer if needed. */
2239 if (buffer1 != NULL)
2240 {
2241 rxDesc->buff1Addr = (uint32_t)(uint8_t *)buffer1;
2242 }
2243 if (buffer2 != NULL)
2244 {
2245 rxDesc->buff2Addr = (uint32_t)(uint8_t *)buffer2;
2246 }
2247 else
2248 {
2249 rxDesc->buff2Addr = 0;
2250 }
2251
2252 rxDesc->reserved = 0;
2253
2254 /* Add a data barrier to be sure that the address is written before the
2255 ownership bit status. */
2256 __DMB();
2257
2258 rxDesc->control = control;
2259 }
2260
2261 /*!
2262 * brief Setup a given tx descriptor.
2263 * This function is a low level functional API to setup or prepare
2264 * a given tx descriptor.
2265 *
2266 * param txDesc The given tx descriptor.
2267 * param buffer1 The first buffer address in the descriptor.
2268 * param bytes1 The bytes in the fist buffer.
2269 * param buffer2 The second buffer address in the descriptor.
2270 * param bytes1 The bytes in the second buffer.
2271 * param framelen The length of the frame to be transmitted.
2272 * param intEnable Interrupt enable flag.
2273 * param tsEnable The timestamp enable.
2274 * param flag The flag of this tx desciriptor, see "enet_qos_desc_flag" .
2275 * param slotNum The slot num used for AV only.
2276 *
2277 * note This must be called after all the ENET initilization.
2278 * And should be called when the ENET receive/transmit is required.
2279 * Transmit buffers are 'zero-copy' buffers, so the buffer must remain in
2280 * memory until the packet has been fully transmitted. The buffers
2281 * should be free or requeued in the transmit interrupt irq handler.
2282 */
ENET_QOS_SetupTxDescriptor(enet_qos_tx_bd_struct_t * txDesc,void * buffer1,uint32_t bytes1,void * buffer2,uint32_t bytes2,uint32_t framelen,bool intEnable,bool tsEnable,enet_qos_desc_flag flag,uint8_t slotNum)2283 void ENET_QOS_SetupTxDescriptor(enet_qos_tx_bd_struct_t *txDesc,
2284 void *buffer1,
2285 uint32_t bytes1,
2286 void *buffer2,
2287 uint32_t bytes2,
2288 uint32_t framelen,
2289 bool intEnable,
2290 bool tsEnable,
2291 enet_qos_desc_flag flag,
2292 uint8_t slotNum)
2293 {
2294 uint32_t control = ENET_QOS_TXDESCRIP_RD_BL1(bytes1) | ENET_QOS_TXDESCRIP_RD_BL2(bytes2);
2295
2296 if (tsEnable)
2297 {
2298 control |= ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2299 }
2300 else
2301 {
2302 control &= ~ENET_QOS_TXDESCRIP_RD_TTSE_MASK;
2303 }
2304
2305 if (intEnable)
2306 {
2307 control |= ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2308 }
2309 else
2310 {
2311 control &= ~ENET_QOS_TXDESCRIP_RD_IOC_MASK;
2312 }
2313
2314 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2315 buffer1 = (void *)(uint32_t *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)buffer1, kMEMORY_Local2DMA);
2316 buffer2 = (void *)(uint32_t *)MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)buffer2, kMEMORY_Local2DMA);
2317 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2318
2319 /* Preare the descriptor for transmit. */
2320 txDesc->buff1Addr = (uint32_t)(uint8_t *)buffer1;
2321 txDesc->buff2Addr = (uint32_t)(uint8_t *)buffer2;
2322 txDesc->buffLen = control;
2323
2324 /* Make sure all fields of descriptor are written before setting ownership */
2325 __DMB();
2326
2327 control = ENET_QOS_TXDESCRIP_RD_FL(framelen) | ENET_QOS_TXDESCRIP_RD_LDFD(flag) | ENET_QOS_TXDESCRIP_RD_OWN_MASK;
2328
2329 txDesc->controlStat = control;
2330
2331 /* Make sure the descriptor is written in memory (before MAC starts checking it) */
2332 __DSB();
2333 }
2334
2335 /*!
2336 * brief Reclaim tx descriptors.
2337 * This function is used to update the tx descriptor status and
2338 * store the tx timestamp when the 1588 feature is enabled.
2339 * This is called by the transmit interupt IRQ handler after the
2340 * complete of a frame transmission.
2341 *
2342 * param base ENET peripheral base address.
2343 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
2344 * param channel The tx DMA channnel.
2345 *
2346 */
ENET_QOS_ReclaimTxDescriptor(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t channel)2347 void ENET_QOS_ReclaimTxDescriptor(ENET_QOS_Type *base, enet_qos_handle_t *handle, uint8_t channel)
2348 {
2349 enet_qos_tx_bd_ring_t *txBdRing = &handle->txBdRing[channel];
2350 enet_qos_tx_bd_struct_t *txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
2351 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2352 enet_qos_frame_info_t *txDirty = NULL;
2353 uint32_t control, primask;
2354
2355 control = txDesc->controlStat;
2356
2357 /* Need to update the first index for transmit buffer free. */
2358 while ((txBdRing->txDescUsed > 0U) && (0U == (control & ENET_QOS_TXDESCRIP_RD_OWN_MASK)))
2359 {
2360 if ((control & ENET_QOS_TXDESCRIP_RD_LD_MASK) != 0U)
2361 {
2362 if (ENET_QOS_TxDirtyRingAvailable(txDirtyRing))
2363 {
2364 txDirty = &txDirtyRing->txDirtyBase[txBdRing->txConsumIdx];
2365 txDirtyRing->txGenIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txGenIdx, txDirtyRing->txRingLen);
2366 if (txDirtyRing->txGenIdx == txDirtyRing->txConsumIdx)
2367 {
2368 txDirtyRing->isFull = true;
2369 }
2370
2371 if ((control & ENET_QOS_TXDESCRIP_WB_TTSS_MASK) != 0U)
2372 {
2373 enet_qos_ptp_time_t *ts = &txDirty->timeStamp;
2374 uint32_t nanosecond;
2375 /* Get transmit time stamp second. */
2376 nanosecond = txDesc->buff1Addr;
2377 txDirty->isTsAvail = true;
2378 if (0U == (base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK))
2379 {
2380 /* Binary rollover, 0.465ns accuracy. */
2381 nanosecond = (nanosecond * 465U) / 1000U;
2382 }
2383 ts->second = txDesc->buff2Addr;
2384 ts->nanosecond = nanosecond;
2385 }
2386 else
2387 {
2388 txDirty->isTsAvail = false;
2389 }
2390 }
2391 }
2392
2393 /* For tx buffer free or requeue for each descriptor.
2394 * The tx interrupt callback should free/requeue the tx buffer. */
2395 if (handle->callback != NULL)
2396 {
2397 handle->callback(base, handle, kENET_QOS_TxIntEvent, channel, handle->userData);
2398 }
2399
2400 primask = DisableGlobalIRQ();
2401 txBdRing->txDescUsed--;
2402 EnableGlobalIRQ(primask);
2403
2404 /* Update the txConsumIdx/txDesc. */
2405 txBdRing->txConsumIdx = ENET_QOS_IncreaseIndex(txBdRing->txConsumIdx, txBdRing->txRingLen);
2406 txDesc = &txBdRing->txBdBase[txBdRing->txConsumIdx];
2407 control = txDesc->controlStat;
2408 }
2409 }
2410
2411 /*!
2412 * brief Transmits an ENET frame.
2413 * note The CRC is automatically appended to the data. Input the data
2414 * to send without the CRC.
2415 *
2416 * param base ENET peripheral base address.
2417 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_QOS_Init.
2418 * param data The data buffer provided by user to be send.
2419 * param length The length of the data to be send.
2420 * param channel Channel to send the frame, same with queue index.
2421 * param isNeedTs True means save timestamp
2422 * param context pointer to user context to be kept in the tx dirty frame information.
2423 * retval kStatus_Success Send frame succeed.
2424 * retval kStatus_ENET_QOS_TxFrameBusy Transmit buffer descriptor is busy under transmission.
2425 * The transmit busy happens when the data send rate is over the MAC capacity.
2426 * The waiting mechanism is recommended to be added after each call return with
2427 * kStatus_ENET_QOS_TxFrameBusy.
2428 */
ENET_QOS_SendFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,uint8_t * data,uint32_t length,uint8_t channel,bool isNeedTs,void * context)2429 status_t ENET_QOS_SendFrame(ENET_QOS_Type *base,
2430 enet_qos_handle_t *handle,
2431 uint8_t *data,
2432 uint32_t length,
2433 uint8_t channel,
2434 bool isNeedTs,
2435 void *context)
2436 {
2437 assert(handle != NULL);
2438 assert(data != NULL);
2439 assert(channel < handle->txQueueUse);
2440
2441 enet_qos_tx_bd_ring_t *txBdRing;
2442 enet_qos_tx_bd_struct_t *txDesc;
2443 enet_qos_tx_dirty_ring_t *txDirtyRing;
2444 enet_qos_frame_info_t *txDirty;
2445 uint32_t primask;
2446
2447 if (length > 2U * ENET_QOS_TXDESCRIP_RD_BL1_MASK)
2448 {
2449 return kStatus_ENET_QOS_TxFrameOverLen;
2450 }
2451
2452 /* Check if the DMA owns the descriptor. */
2453 txBdRing = (enet_qos_tx_bd_ring_t *)&handle->txBdRing[channel];
2454 txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
2455 if (txBdRing->txRingLen == txBdRing->txDescUsed)
2456 {
2457 return kStatus_ENET_QOS_TxFrameBusy;
2458 }
2459
2460 txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2461 txDirty = &txDirtyRing->txDirtyBase[txBdRing->txGenIdx];
2462 txDirty->context = context;
2463
2464 /* Fill the descriptor. */
2465 if (length <= ENET_QOS_TXDESCRIP_RD_BL1_MASK)
2466 {
2467 ENET_QOS_SetupTxDescriptor(txDesc, data, length, NULL, 0, length, true, isNeedTs, kENET_QOS_FirstLastFlag, 0);
2468 }
2469 else
2470 {
2471 ENET_QOS_SetupTxDescriptor(txDesc, data, ENET_QOS_TXDESCRIP_RD_BL1_MASK, &data[ENET_QOS_TXDESCRIP_RD_BL1_MASK],
2472 (length - ENET_QOS_TXDESCRIP_RD_BL1_MASK), length, true, isNeedTs,
2473 kENET_QOS_FirstLastFlag, 0);
2474 }
2475
2476 /* Increase the index. */
2477 txBdRing->txGenIdx = ENET_QOS_IncreaseIndex(txBdRing->txGenIdx, txBdRing->txRingLen);
2478 /* Disable interrupt first and then enable interrupt to avoid the race condition. */
2479 primask = DisableGlobalIRQ();
2480 txBdRing->txDescUsed++;
2481 EnableGlobalIRQ(primask);
2482
2483 /* Update the transmit tail address. */
2484 txDesc = &txBdRing->txBdBase[txBdRing->txGenIdx];
2485 if (txBdRing->txGenIdx == 0U)
2486 {
2487 txDesc = &txBdRing->txBdBase[txBdRing->txRingLen];
2488 }
2489 base->DMA_CH[channel].DMA_CHX_TXDESC_TAIL_PTR = (uint32_t)txDesc & ~ENET_QOS_ADDR_ALIGNMENT;
2490
2491 return kStatus_Success;
2492 }
2493
2494 /*!
2495 * brief Gets the sent frame.
2496 *
2497 * This function is used to get the sent frame for timestamp and buffer clean operation.
2498 *
2499 * param handle The ENET handler pointer.This is the same state pointer used in
2500 * ENET_QOS_Init.
2501 * param txFrame Input parameter, pointer to enet_qos_frame_info_t for saving read out frame information.
2502 * param channel Read out frame from specified channel.
2503 */
ENET_QOS_GetTxFrame(enet_qos_handle_t * handle,enet_qos_frame_info_t * txFrame,uint8_t channel)2504 void ENET_QOS_GetTxFrame(enet_qos_handle_t *handle, enet_qos_frame_info_t *txFrame, uint8_t channel)
2505 {
2506 assert(handle != NULL);
2507 assert(channel < handle->txQueueUse);
2508
2509 enet_qos_tx_dirty_ring_t *txDirtyRing = (enet_qos_tx_dirty_ring_t *)&handle->txDirtyRing[channel];
2510 enet_qos_frame_info_t *txDirty = &txDirtyRing->txDirtyBase[txDirtyRing->txConsumIdx];
2511
2512 (void)memcpy(txFrame, txDirty, sizeof(enet_qos_frame_info_t));
2513
2514 txDirtyRing->isFull = false;
2515 txDirtyRing->txConsumIdx = ENET_QOS_IncreaseIndex(txDirtyRing->txConsumIdx, txDirtyRing->txRingLen);
2516 }
2517
ENET_QOS_GetRxFrameErr(enet_qos_rx_bd_struct_t * rxDesc,enet_qos_rx_frame_error_t * rxFrameError)2518 static inline void ENET_QOS_GetRxFrameErr(enet_qos_rx_bd_struct_t *rxDesc, enet_qos_rx_frame_error_t *rxFrameError)
2519 {
2520 uint32_t rdes2 = rxDesc->buff2Addr;
2521 uint32_t rdes3 = rxDesc->control;
2522
2523 (void)memset(rxFrameError, 0, sizeof(enet_qos_rx_frame_error_t));
2524
2525 if ((rdes2 & ENET_QOS_RXDESCRIP_WR_SA_FAILURE_MASK) != 0U)
2526 {
2527 rxFrameError->rxSrcAddrFilterErr = true;
2528 }
2529 if ((rdes2 & ENET_QOS_RXDESCRIP_WR_DA_FAILURE_MASK) != 0U)
2530 {
2531 rxFrameError->rxDstAddrFilterErr = true;
2532 }
2533 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_DE_MASK) != 0U)
2534 {
2535 rxFrameError->rxDstAddrFilterErr = true;
2536 }
2537 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RE_MASK) != 0U)
2538 {
2539 rxFrameError->rxReceiveErr = true;
2540 }
2541 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_OE_MASK) != 0U)
2542 {
2543 rxFrameError->rxOverFlowErr = true;
2544 }
2545 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_RWT_MASK) != 0U)
2546 {
2547 rxFrameError->rxWatchDogErr = true;
2548 }
2549 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_GP_MASK) != 0U)
2550 {
2551 rxFrameError->rxGaintPacketErr = true;
2552 }
2553 if ((rdes3 & ENET_QOS_RXDESCRIP_WR_CRC_MASK) != 0U)
2554 {
2555 rxFrameError->rxCrcErr = true;
2556 }
2557 }
2558
2559 /*!
2560 * brief Receives one frame in specified BD ring with zero copy.
2561 *
2562 * This function will use the user-defined allocate and free callback. Every time application gets one frame through
2563 * this function, driver will allocate new buffers for the BDs whose buffers have been taken by application.
2564 * note This function will drop current frame and update related BDs as available for DMA if new buffers allocating
2565 * fails. Application must provide a memory pool including at least BD number + 1 buffers(+2 if enable double buffer)
2566 * to make this function work normally. If user calls this function in Rx interrupt handler, be careful that this
2567 * function makes Rx BD ready with allocating new buffer(normal) or updating current BD(out of memory). If there's
2568 * always new Rx frame input, Rx interrupt will be triggered forever. Application need to disable Rx interrupt according
2569 * to specific design in this case.
2570 *
2571 * param base ENET peripheral base address.
2572 * param handle The ENET handler pointer. This is the same handler pointer used in the ENET_Init.
2573 * param rxFrame The received frame information structure provided by user.
2574 * param ringId The ring index or ring number.
2575 * retval kStatus_Success Succeed to get one frame and allocate new memory for Rx buffer.
2576 * retval kStatus_ENET_QOS_RxFrameEmpty There's no Rx frame in the BD.
2577 * retval kStatus_ENET_QOS_RxFrameError There's issue in this receiving.
2578 * retval kStatus_ENET_QOS_RxFrameDrop There's no new buffer memory for BD, drop this frame.
2579 */
ENET_QOS_GetRxFrame(ENET_QOS_Type * base,enet_qos_handle_t * handle,enet_qos_rx_frame_struct_t * rxFrame,uint8_t channel)2580 status_t ENET_QOS_GetRxFrame(ENET_QOS_Type *base,
2581 enet_qos_handle_t *handle,
2582 enet_qos_rx_frame_struct_t *rxFrame,
2583 uint8_t channel)
2584 {
2585 assert(handle != NULL);
2586 assert(channel < handle->rxQueueUse);
2587
2588 enet_qos_rx_bd_ring_t *rxBdRing = (enet_qos_rx_bd_ring_t *)&handle->rxBdRing[channel];
2589 enet_qos_rx_bd_struct_t *rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2590 uint16_t index = rxBdRing->rxGenIdx;
2591 status_t result = kStatus_Success;
2592 uint32_t buff1Addr = 0;
2593 uint32_t buff2Addr = 0;
2594 uint16_t buff1Len = 0;
2595 uint16_t buff2Len = 0;
2596 uint16_t offset = 0;
2597 void *newBuff1 = NULL;
2598 void *newBuff2 = NULL;
2599 bool isDrop = false;
2600 bool isLastBuff = false;
2601 bool tsAvailable = false;
2602
2603 /* Check the frame status. */
2604 do
2605 {
2606 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U)
2607 {
2608 result = kStatus_ENET_QOS_RxFrameEmpty;
2609 break;
2610 }
2611
2612 /* Check timestamp and error. */
2613 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2614 {
2615 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_RS1V_MASK) != 0U)
2616 {
2617 if ((rxDesc->reserved & ENET_QOS_RXDESCRIP_WR_PTPTSA_MASK) != 0U)
2618 {
2619 /* Context descriptor is expected but might not be yet available. */
2620 uint8_t retryTimes = 10;
2621
2622 while (((rxDesc->control & ENET_QOS_RXDESCRIP_WR_OWN_MASK) != 0U) ||
2623 ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) == 0U))
2624 {
2625 /* Timsstamp value is not corrupted. */
2626 if ((rxDesc->buff1Addr != 0xFFFFFFFFU) && (rxDesc->buff2Addr != 0xFFFFFFFFU))
2627 {
2628 break;
2629 }
2630 if (retryTimes-- == 0U)
2631 {
2632 break;
2633 }
2634 }
2635
2636 if (retryTimes != 0U)
2637 {
2638 tsAvailable = true;
2639 }
2640 else
2641 {
2642 result = kStatus_ENET_QOS_RxFrameEmpty;
2643 break;
2644 }
2645 }
2646 }
2647
2648 /* Get the frame error if there is. */
2649 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_ERRSUM_MASK) != 0U)
2650 {
2651 ENET_QOS_GetRxFrameErr(rxDesc, &rxFrame->rxFrameError);
2652 result = kStatus_ENET_QOS_RxFrameError;
2653 }
2654 else if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK) == 0U)
2655 {
2656 result = kStatus_ENET_QOS_RxFrameEmpty;
2657 }
2658 else
2659 {
2660 /* Intentional empty */
2661 }
2662 break;
2663 }
2664
2665 index = ENET_QOS_IncreaseIndex(index, rxBdRing->rxRingLen);
2666 if (index == rxBdRing->rxGenIdx)
2667 {
2668 result = kStatus_ENET_QOS_RxFrameEmpty;
2669 break;
2670 }
2671 rxDesc = &rxBdRing->rxBdBase[index];
2672 } while (index != rxBdRing->rxGenIdx);
2673
2674 /* Drop the error frame and return error. */
2675 if (result != kStatus_Success)
2676 {
2677 if (result == kStatus_ENET_QOS_RxFrameError)
2678 {
2679 ENET_QOS_DropFrame(base, handle, channel);
2680 }
2681 return result;
2682 }
2683
2684 /* Get the valid frame */
2685 index = 0;
2686 do
2687 {
2688 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2689
2690 /* Caculate the buffer and frame length. */
2691 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_LD_MASK) != 0U)
2692 {
2693 isLastBuff = true;
2694 rxFrame->totLen = (uint16_t)(rxDesc->control & ENET_QOS_RXDESCRIP_WR_PACKETLEN_MASK);
2695
2696 if (rxFrame->totLen - offset > (uint16_t)rxBdRing->rxBuffSizeAlign)
2697 {
2698 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2699 if (handle->doubleBuffEnable)
2700 {
2701 buff2Len = rxFrame->totLen - offset - (uint16_t)rxBdRing->rxBuffSizeAlign - ENET_QOS_FCS_LEN;
2702 }
2703 }
2704 else
2705 {
2706 buff1Len = rxFrame->totLen - offset - ENET_QOS_FCS_LEN;
2707 }
2708 rxFrame->totLen -= ENET_QOS_FCS_LEN;
2709 }
2710 else
2711 {
2712 if (!handle->doubleBuffEnable)
2713 {
2714 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2715 offset += buff1Len;
2716 }
2717 else
2718 {
2719 buff1Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2720 buff2Len = (uint16_t)rxBdRing->rxBuffSizeAlign;
2721 offset += buff1Len + buff2Len;
2722 }
2723 }
2724
2725 /* Allocate new buffer to replace the buffer taken by application */
2726 newBuff1 = handle->rxBuffAlloc(base, handle->userData, channel);
2727 if (newBuff1 == NULL)
2728 {
2729 isDrop = true;
2730 }
2731 else if (handle->doubleBuffEnable && (buff2Len != 0U))
2732 {
2733 newBuff2 = handle->rxBuffAlloc(base, handle->userData, channel);
2734 if (newBuff2 == NULL)
2735 {
2736 handle->rxBuffFree(base, newBuff1, handle->userData, channel);
2737 isDrop = true;
2738 }
2739 }
2740 else
2741 {
2742 /* Intentional empty */
2743 }
2744
2745 if (!isDrop)
2746 {
2747 /* Get the frame data information into Rx frame structure. */
2748 if (!handle->doubleBuffEnable)
2749 {
2750 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2751 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2752 buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
2753 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2754 if (handle->rxMaintainEnable[channel])
2755 {
2756 DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2757 }
2758 rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff1Addr;
2759 rxFrame->rxBuffArray[index].length = buff1Len;
2760 index++;
2761 }
2762 else
2763 {
2764 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2765 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2766 buff1Addr = MEMORY_ConvertMemoryMapAddress(buff1Addr, kMEMORY_DMA2Local);
2767 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2768 if (handle->rxMaintainEnable[channel])
2769 {
2770 DCACHE_InvalidateByRange(buff1Addr, rxBdRing->rxBuffSizeAlign);
2771 }
2772 rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff1Addr;
2773 rxFrame->rxBuffArray[index].length = buff1Len;
2774 index++;
2775
2776 /* If there's no data in buffer2, not add it into rxFrame */
2777 if (buff2Len != 0U)
2778 {
2779 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2780 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2781 buff2Addr = MEMORY_ConvertMemoryMapAddress(buff2Addr, kMEMORY_DMA2Local);
2782 #endif /* FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET */
2783 if (handle->rxMaintainEnable[channel])
2784 {
2785 DCACHE_InvalidateByRange(buff2Addr, rxBdRing->rxBuffSizeAlign);
2786 }
2787 rxFrame->rxBuffArray[index].buffer = (void *)(uint32_t *)buff2Addr;
2788 rxFrame->rxBuffArray[index].length = buff2Len;
2789 index++;
2790 }
2791 }
2792
2793 /* Give new buffer from application to BD */
2794 if (!handle->doubleBuffEnable)
2795 {
2796 if (handle->rxMaintainEnable[channel])
2797 {
2798 DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
2799 }
2800 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2801 buff1Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff1, kMEMORY_Local2DMA);
2802 #else
2803 buff1Addr = (uint32_t)(uint32_t *)newBuff1;
2804 #endif
2805 handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx] = buff1Addr;
2806 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint32_t *)buff1Addr, NULL, handle->rxintEnable,
2807 handle->doubleBuffEnable);
2808 }
2809 else
2810 {
2811 if (handle->rxMaintainEnable[channel])
2812 {
2813 DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff1, rxBdRing->rxBuffSizeAlign);
2814 }
2815 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2816 buff1Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff1, kMEMORY_Local2DMA);
2817 #else
2818 buff1Addr = (uint32_t)(uint32_t *)newBuff1;
2819 #endif
2820 handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx] = buff1Addr;
2821
2822 if (buff2Len != 0U)
2823 {
2824 if (handle->rxMaintainEnable[channel])
2825 {
2826 DCACHE_InvalidateByRange((uint32_t)(uint32_t *)newBuff2, rxBdRing->rxBuffSizeAlign);
2827 }
2828 #if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
2829 buff2Addr = MEMORY_ConvertMemoryMapAddress((uint32_t)(uint32_t *)newBuff2, kMEMORY_Local2DMA);
2830 #else
2831 buff2Addr = (uint32_t)(uint32_t *)newBuff2;
2832 #endif
2833 handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U] = buff2Addr;
2834 }
2835 else
2836 {
2837 /* If there's no data in buffer2, keep it */
2838 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2839 }
2840
2841 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint32_t *)buff1Addr, (void *)(uint32_t *)buff2Addr,
2842 handle->rxintEnable, handle->doubleBuffEnable);
2843 }
2844 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2845
2846 /* Update context BD if there is */
2847 if (isLastBuff && tsAvailable)
2848 {
2849 rxDesc = &rxBdRing->rxBdBase[rxBdRing->rxGenIdx];
2850 if ((rxDesc->control & ENET_QOS_RXDESCRIP_WR_CTXT_MASK) != 0U)
2851 {
2852 ENET_QOS_StoreRxFrameTime(base, handle, rxDesc, &rxFrame->rxAttribute.timestamp);
2853 rxFrame->rxAttribute.isTsAvail = true;
2854
2855 if (!handle->doubleBuffEnable)
2856 {
2857 buff1Addr = handle->rxBufferStartAddr[channel][rxBdRing->rxGenIdx];
2858 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, NULL, handle->rxintEnable,
2859 handle->doubleBuffEnable);
2860 }
2861 else
2862 {
2863 buff1Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx];
2864 buff2Addr = handle->rxBufferStartAddr[channel][2U * rxBdRing->rxGenIdx + 1U];
2865 ENET_QOS_UpdateRxDescriptor(rxDesc, (void *)(uint8_t *)buff1Addr, (void *)(uint8_t *)buff2Addr,
2866 handle->rxintEnable, handle->doubleBuffEnable);
2867 }
2868 rxBdRing->rxGenIdx = ENET_QOS_IncreaseIndex(rxBdRing->rxGenIdx, rxBdRing->rxRingLen);
2869 }
2870 }
2871 /* Always try to start receive, in case it had stopped */
2872 base->DMA_CH[channel].DMA_CHX_RXDESC_TAIL_PTR =
2873 (uint32_t)(uint8_t *)&rxBdRing->rxBdBase[rxBdRing->rxRingLen];
2874 }
2875 else
2876 {
2877 /* Drop frame if there's no new buffer memory */
2878
2879 /* Free the incomplete frame buffers. */
2880 while (index-- != 0U)
2881 {
2882 handle->rxBuffFree(base, &rxFrame->rxBuffArray[index].buffer, handle->userData, channel);
2883 }
2884
2885 /* Update all left BDs of this frame from current index. */
2886 ENET_QOS_DropFrame(base, handle, channel);
2887
2888 result = kStatus_ENET_QOS_RxFrameDrop;
2889 break;
2890 }
2891 } while (!isLastBuff);
2892
2893 return result;
2894 }
2895
2896 /*!
2897 * brief Gets the current ENET time from the PTP 1588 timer without IRQ disable.
2898 *
2899 * param base ENET peripheral base address.
2900 * param second The PTP 1588 system timer second.
2901 * param nanosecond The PTP 1588 system timer nanosecond.
2902 * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
2903 */
ENET_QOS_Ptp1588GetTimerNoIRQDisable(ENET_QOS_Type * base,uint64_t * second,uint32_t * nanosecond)2904 void ENET_QOS_Ptp1588GetTimerNoIRQDisable(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
2905 {
2906 assert(second != NULL);
2907 assert(nanosecond != NULL);
2908
2909 uint32_t high_sec[2];
2910 uint32_t sec[2];
2911
2912 /* Get the current PTP time. */
2913 /* Since register reads are not atomic, we need to check for wraps during the read */
2914 high_sec[1] = base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
2915
2916 do
2917 {
2918 high_sec[0] = high_sec[1];
2919
2920 sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
2921
2922 do
2923 {
2924 sec[0] = sec[1];
2925 *nanosecond = base->MAC_SYSTEM_TIME_NANOSECONDS & ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_TSSS_MASK;
2926 sec[1] = base->MAC_SYSTEM_TIME_SECONDS;
2927 } while (sec[1] != sec[0]);
2928
2929 high_sec[1] =
2930 base->MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS & ENET_QOS_MAC_SYSTEM_TIME_HIGHER_WORD_SECONDS_TSHWR_MASK;
2931 } while (high_sec[1] != high_sec[0]);
2932
2933 *second = ((uint64_t)high_sec[1] << 32U) | sec[1];
2934
2935 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) == 0U)
2936 {
2937 /* Binary rollover, the unit of the increment is ~ 0.465 ns. */
2938 *nanosecond = (*nanosecond * 465U) / 1000U;
2939 }
2940 }
2941
2942 /*!
2943 * brief Gets the current ENET time from the PTP 1588 timer, get a more accurate value
2944 * with IRQ disabled during get timer.
2945 *
2946 * param base ENET peripheral base address.
2947 * param second The PTP 1588 system timer second.
2948 * param nanosecond The PTP 1588 system timer nanosecond.
2949 * For the unit of the nanosecond is 1ns. so the nanosecond is the real nanosecond.
2950 */
ENET_QOS_Ptp1588GetTimer(ENET_QOS_Type * base,uint64_t * second,uint32_t * nanosecond)2951 void ENET_QOS_Ptp1588GetTimer(ENET_QOS_Type *base, uint64_t *second, uint32_t *nanosecond)
2952 {
2953 uint32_t primask;
2954
2955 /* Disables the interrupt. */
2956 primask = DisableGlobalIRQ();
2957
2958 ENET_QOS_Ptp1588GetTimerNoIRQDisable(base, second, nanosecond);
2959
2960 /* Enables the interrupt. */
2961 EnableGlobalIRQ(primask);
2962 }
2963
2964 /*!
2965 * brief Coreect the ENET PTP 1588 timer in coarse method.
2966 *
2967 * param base ENET peripheral base address.
2968 * param operation The system time operation, refer to "enet_qos_systime_op"
2969 * param second The correction second.
2970 * param nanosecond The correction nanosecond.
2971 */
ENET_QOS_Ptp1588CorrectTimerInCoarse(ENET_QOS_Type * base,enet_qos_systime_op operation,uint32_t second,uint32_t nanosecond)2972 status_t ENET_QOS_Ptp1588CorrectTimerInCoarse(ENET_QOS_Type *base,
2973 enet_qos_systime_op operation,
2974 uint32_t second,
2975 uint32_t nanosecond)
2976 {
2977 uint32_t corrSecond = second;
2978 uint32_t corrNanosecond;
2979 status_t result = kStatus_Success;
2980
2981 /* Set the system timer. */
2982 if ((base->MAC_TIMESTAMP_CONTROL & ENET_QOS_MAC_TIMESTAMP_CONTROL_TSCTRLSSR_MASK) != 0U)
2983 {
2984 if (operation == kENET_QOS_SystimeSubtract)
2985 {
2986 /* Set with the complement of the sub-second. */
2987 corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
2988 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
2989 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(ENET_QOS_NANOSECS_ONESECOND - nanosecond);
2990 }
2991 else
2992 {
2993 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
2994 }
2995 }
2996 else
2997 {
2998 nanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK / ENET_QOS_NANOSECS_ONESECOND * nanosecond;
2999 if (operation == kENET_QOS_SystimeSubtract)
3000 {
3001 /* Set with the complement of the sub-second. */
3002 corrSecond = ENET_QOS_MAC_SYSTEM_TIME_SECONDS_UPDATE_TSS_MASK - (second - 1U);
3003 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_MASK |
3004 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(
3005 ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS_MASK + 1U - nanosecond);
3006 }
3007 else
3008 {
3009 corrNanosecond = ENET_QOS_MAC_SYSTEM_TIME_NANOSECONDS_UPDATE_TSSS(nanosecond);
3010 }
3011 }
3012
3013 base->MAC_SYSTEM_TIME_SECONDS_UPDATE = corrSecond;
3014 base->MAC_SYSTEM_TIME_NANOSECONDS_UPDATE = corrNanosecond;
3015
3016 /* Update the timer. */
3017 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK;
3018
3019 /* Wait for update finish */
3020 result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSUPDT_MASK, 0U);
3021
3022 return result;
3023 }
3024
3025 /*!
3026 * brief Correct the ENET PTP 1588 timer in fine method.
3027 *
3028 *
3029 * param base ENET peripheral base address.
3030 * param addend The addend value to be set in the fine method
3031 * note Should take refer to the chapter "System time correction" and
3032 * see the description for the "fine correction method".
3033 */
ENET_QOS_Ptp1588CorrectTimerInFine(ENET_QOS_Type * base,uint32_t addend)3034 status_t ENET_QOS_Ptp1588CorrectTimerInFine(ENET_QOS_Type *base, uint32_t addend)
3035 {
3036 status_t result = kStatus_Success;
3037
3038 base->MAC_TIMESTAMP_ADDEND = addend;
3039 base->MAC_TIMESTAMP_CONTROL |= ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK;
3040
3041 result = ENET_QOS_PollStatusFlag(&(base->MAC_TIMESTAMP_CONTROL), ENET_QOS_MAC_TIMESTAMP_CONTROL_TSADDREG_MASK, 0U);
3042
3043 return result;
3044 }
3045
3046 /*!
3047 * @brief Sets the ENET OQS PTP 1588 PPS target time registers.
3048 *
3049 * param base ENET QOS peripheral base address.
3050 * param instance The ENET QOS PTP PPS instance.
3051 * param seconds The target seconds.
3052 * param nanoseconds The target nanoseconds.
3053 */
ENET_QOS_Ptp1588PpsSetTrgtTime(ENET_QOS_Type * base,enet_qos_ptp_pps_instance_t instance,uint32_t seconds,uint32_t nanoseconds)3054 status_t ENET_QOS_Ptp1588PpsSetTrgtTime(ENET_QOS_Type *base,
3055 enet_qos_ptp_pps_instance_t instance,
3056 uint32_t seconds,
3057 uint32_t nanoseconds)
3058 {
3059 uint32_t *mac_pps_trgt_ns;
3060 uint32_t *mac_pps_trgt_s;
3061
3062 mac_pps_trgt_ns = (uint32_t *)((uint32_t)&base->MAC_PPS0_TARGET_TIME_NANOSECONDS + 0x10U * (uint32_t)instance);
3063 mac_pps_trgt_s = (uint32_t *)((uint32_t)&base->MAC_PPS0_TARGET_TIME_SECONDS + 0x10U * (uint32_t)instance);
3064
3065 if ((*mac_pps_trgt_ns & ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TRGTBUSY0_MASK) != 0U)
3066 {
3067 return kStatus_ENET_QOS_TrgtBusy;
3068 }
3069
3070 *mac_pps_trgt_ns = ENET_QOS_MAC_PPS0_TARGET_TIME_NANOSECONDS_TTSL0(nanoseconds);
3071 *mac_pps_trgt_s = ENET_QOS_MAC_PPS0_TARGET_TIME_SECONDS_TSTRH0(seconds);
3072
3073 return kStatus_Success;
3074 }
3075
ENET_QOS_EstReadWriteWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr,uint8_t read,uint8_t dbgm)3076 static status_t ENET_QOS_EstReadWriteWord(
3077 ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t read, uint8_t dbgm)
3078 {
3079 uint32_t ctrl;
3080 int retry = 10;
3081
3082 ctrl = ENET_QOS_MTL_EST_GCL_CONTROL_ADDR(addr) | ENET_QOS_MTL_EST_GCL_CONTROL_SRWO(1) |
3083 ENET_QOS_MTL_EST_GCL_CONTROL_DBGM(dbgm) | ENET_QOS_MTL_EST_GCL_CONTROL_GCRR(gcrr);
3084
3085 if (read != 0U)
3086 {
3087 ctrl |= ENET_QOS_MTL_EST_GCL_CONTROL_R1W0(1);
3088 }
3089 else
3090 {
3091 base->MTL_EST_GCL_DATA = *data;
3092 }
3093
3094 base->MTL_EST_GCL_CONTROL = ctrl;
3095
3096 while ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_SRWO_MASK) != 0U)
3097 {
3098 if (retry-- < 0)
3099 {
3100 return kStatus_Timeout;
3101 }
3102 SDK_DelayAtLeastUs(1, SDK_DEVICE_MAXIMUM_CPU_CLOCK_FREQUENCY);
3103 }
3104
3105 if (read != 0U)
3106 {
3107 *data = base->MTL_EST_GCL_DATA;
3108 }
3109
3110 if ((base->MTL_EST_GCL_CONTROL & ENET_QOS_MTL_EST_GCL_CONTROL_ERR0_MASK) != 0U)
3111 {
3112 return kStatus_ENET_QOS_Est_SwListWriteAbort;
3113 }
3114
3115 return kStatus_Success;
3116 }
3117
ENET_QOS_EstProgramWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr)3118 static status_t ENET_QOS_EstProgramWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr)
3119 {
3120 return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 0, 0);
3121 }
3122
ENET_QOS_EstReadWord(ENET_QOS_Type * base,uint32_t addr,uint32_t * data,uint8_t gcrr,uint8_t dbgm)3123 static status_t ENET_QOS_EstReadWord(ENET_QOS_Type *base, uint32_t addr, uint32_t *data, uint8_t gcrr, uint8_t dbgm)
3124 {
3125 return ENET_QOS_EstReadWriteWord(base, addr, data, gcrr, 1, dbgm);
3126 }
3127
3128 /*!
3129 * @brief Program Gate Control List.
3130 *
3131 * This function is used to program the Enhanced Scheduled Transmisson. (IEEE802.1Qbv)
3132 *
3133 * @param base ENET peripheral base address..
3134 * @param gcl Pointer to the Gate Control List structure.
3135 * @param ptpClk_Hz frequency of the PTP clock.
3136 */
ENET_QOS_EstProgramGcl(ENET_QOS_Type * base,enet_qos_est_gcl_t * gcl,uint32_t ptpClk_Hz)3137 status_t ENET_QOS_EstProgramGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t ptpClk_Hz)
3138 {
3139 assert(gcl != NULL);
3140 uint32_t i, control, data;
3141 enet_qos_est_gate_op_t *gateOp;
3142 status_t rc;
3143
3144 #define EST_MAX_INTERVAL ((1UL << ENET_QOS_EST_WID) - 1U)
3145 #define EST_MAX_GATE ((1UL << (32U - ENET_QOS_EST_WID)) - 1U)
3146
3147 if (!gcl->enable)
3148 {
3149 goto exit;
3150 }
3151
3152 /* Sanity checks */
3153 if (gcl->numEntries > ENET_QOS_EST_DEP)
3154 {
3155 return kStatus_ENET_QOS_Est_InvalidParameter;
3156 }
3157
3158 if (gcl->opList == NULL)
3159 {
3160 return kStatus_ENET_QOS_Est_InvalidParameter;
3161 }
3162
3163 gateOp = gcl->opList;
3164
3165 for (i = 0; i < gcl->numEntries; i++)
3166 {
3167 if (gateOp->interval > EST_MAX_INTERVAL)
3168 {
3169 return kStatus_ENET_QOS_Est_InvalidParameter;
3170 }
3171 if (gateOp->gate > EST_MAX_GATE)
3172 {
3173 return kStatus_ENET_QOS_Est_InvalidParameter;
3174 }
3175 gateOp++;
3176 }
3177
3178 /* Check if sw list is busy */
3179 if ((base->MTL_EST_CONTROL & ENET_QOS_MTL_EST_CONTROL_SSWL_MASK) != 0U)
3180 {
3181 return kStatus_ENET_QOS_Est_SwListBusy;
3182 }
3183
3184 gateOp = gcl->opList;
3185
3186 for (i = 0; i < gcl->numEntries; i++)
3187 {
3188 data = gateOp->interval | (gateOp->gate << ENET_QOS_EST_WID);
3189 rc = ENET_QOS_EstProgramWord(base, i, &data, 0);
3190 if (rc != kStatus_Success)
3191 {
3192 return rc;
3193 }
3194
3195 gateOp++;
3196 }
3197
3198 /* BTR High */
3199 data = (uint32_t)(gcl->baseTime >> 32U);
3200 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1U);
3201 if (rc != kStatus_Success)
3202 {
3203 return rc;
3204 }
3205
3206 /* BTR Low */
3207 data = (uint32_t)gcl->baseTime;
3208 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1);
3209 if (rc != kStatus_Success)
3210 {
3211 return rc;
3212 }
3213
3214 /* CTR High */
3215 data = (uint32_t)(gcl->cycleTime >> 32U);
3216 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1);
3217 if (rc != kStatus_Success)
3218 {
3219 return rc;
3220 }
3221
3222 /* CTR Low */
3223 data = (uint32_t)gcl->cycleTime;
3224 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1);
3225 if (rc != kStatus_Success)
3226 {
3227 return rc;
3228 }
3229
3230 /* TER */
3231 data = gcl->extTime;
3232 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1);
3233 if (rc != kStatus_Success)
3234 {
3235 return rc;
3236 }
3237
3238 /* LLR */
3239 data = gcl->numEntries;
3240 rc = ENET_QOS_EstProgramWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1);
3241 if (rc != kStatus_Success)
3242 {
3243 return rc;
3244 }
3245
3246 exit:
3247 control = base->MTL_EST_CONTROL;
3248
3249 if (gcl->enable)
3250 {
3251 control &= ~ENET_QOS_MTL_EST_CONTROL_PTOV_MASK;
3252 control |= ENET_QOS_MTL_EST_CONTROL_SSWL_MASK | ENET_QOS_MTL_EST_CONTROL_EEST_MASK |
3253 ENET_QOS_MTL_EST_CONTROL_PTOV((1000000000U / ptpClk_Hz) * 6U);
3254 }
3255 else
3256 {
3257 control &= ~ENET_QOS_MTL_EST_CONTROL_EEST_MASK;
3258 }
3259
3260 base->MTL_EST_CONTROL = control;
3261
3262 return kStatus_Success;
3263 }
3264
3265 /*!
3266 * @brief Read Gate Control List.
3267 *
3268 * This function is used to read the Enhanced Scheduled Transmisson list. (IEEE802.1Qbv)
3269 *
3270 * @param base ENET peripheral base address..
3271 * @param gcl Pointer to the Gate Control List structure.
3272 * @param listLen length of the provided opList array in gcl structure.
3273 * @param hwList Boolean if True read HW list, false read SW list.
3274 */
ENET_QOS_EstReadGcl(ENET_QOS_Type * base,enet_qos_est_gcl_t * gcl,uint32_t listLen,bool hwList)3275 status_t ENET_QOS_EstReadGcl(ENET_QOS_Type *base, enet_qos_est_gcl_t *gcl, uint32_t listLen, bool hwList)
3276 {
3277 assert(gcl != NULL);
3278 assert(gcl->opList != NULL);
3279 uint8_t dbgm = 0;
3280 uint32_t data, i;
3281 enet_qos_est_gate_op_t *gateOp;
3282 status_t rc;
3283
3284 if (hwList == true)
3285 {
3286 dbgm = 1;
3287 }
3288
3289 /* LLR */
3290 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_llr, &data, 1, dbgm);
3291 if (rc != kStatus_Success)
3292 {
3293 return rc;
3294 }
3295
3296 gcl->numEntries = data;
3297
3298 if (gcl->numEntries > listLen)
3299 {
3300 return kStatus_ENET_QOS_Est_InvalidParameter;
3301 }
3302
3303 /* BTR High */
3304 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_high, &data, 1, dbgm);
3305 if (rc != kStatus_Success)
3306 {
3307 return rc;
3308 }
3309
3310 gcl->baseTime = (uint64_t)data << 32U;
3311
3312 /* BTR Low */
3313 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_btr_low, &data, 1, dbgm);
3314 if (rc != kStatus_Success)
3315 {
3316 return rc;
3317 }
3318
3319 gcl->baseTime |= data;
3320
3321 /* CTR High */
3322 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_high, &data, 1, dbgm);
3323 if (rc != kStatus_Success)
3324 {
3325 return rc;
3326 }
3327
3328 gcl->cycleTime = (uint64_t)data << 32U;
3329
3330 /* CTR Low */
3331 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ctr_low, &data, 1, dbgm);
3332 if (rc != kStatus_Success)
3333 {
3334 return rc;
3335 }
3336
3337 gcl->cycleTime |= data;
3338
3339 /* TER */
3340 rc = ENET_QOS_EstReadWord(base, (uint32_t)kENET_QOS_Ets_ter, &data, 1, dbgm);
3341 if (rc != kStatus_Success)
3342 {
3343 return rc;
3344 }
3345
3346 gcl->extTime = data;
3347
3348 gateOp = gcl->opList;
3349
3350 for (i = 0; i < gcl->numEntries; i++)
3351 {
3352 rc = ENET_QOS_EstReadWord(base, i, &data, 0, dbgm);
3353 if (rc != kStatus_Success)
3354 {
3355 return rc;
3356 }
3357
3358 gateOp->interval = data & (EST_MAX_INTERVAL);
3359 gateOp->gate = data >> ENET_QOS_EST_WID;
3360 gateOp++;
3361 }
3362
3363 return kStatus_Success;
3364 }
3365
3366 /*!
3367 * brief Read flexible rx parser configuration at specified index.
3368 *
3369 * This function is used to read flexible rx parser configuration at specified index.
3370 *
3371 * param base ENET peripheral base address..
3372 * param rxpConfig The rx parser configuration pointer.
3373 * param entryIndex The rx parser entry index to read, start from 0.
3374 * retval kStatus_Success Configure rx parser success.
3375 * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
3376 */
ENET_QOS_ReadRxParser(ENET_QOS_Type * base,enet_qos_rxp_config_t * rxpConfig,uint16_t entryIndex)3377 status_t ENET_QOS_ReadRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryIndex)
3378 {
3379 assert(rxpConfig != NULL);
3380 assert(entryIndex < ENET_QOS_RXP_ENTRY_COUNT);
3381
3382 uint32_t *dataPtr;
3383 uint8_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
3384 uint32_t value = 0U;
3385 status_t result = kStatus_Success;
3386
3387 /* Wait hardware not busy */
3388 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
3389 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3390 if (kStatus_Success != result)
3391 {
3392 return result;
3393 }
3394
3395 for (uint8_t i = 0; i < entrySize; i++)
3396 {
3397 /* Read address. */
3398 value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR((uint32_t)entrySize * entryIndex + i);
3399
3400 /* Issue read command. */
3401 value &= ~ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
3402 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3403
3404 /* Start Read */
3405 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
3406 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3407
3408 /* Wait hardware not busy */
3409 result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
3410 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3411 if (kStatus_Success != result)
3412 {
3413 return result;
3414 }
3415
3416 dataPtr = (uint32_t *)(void *)&rxpConfig[entryIndex];
3417 dataPtr = &dataPtr[i];
3418 /* Read data */
3419 *dataPtr = base->MTL_RXP_INDIRECT_ACC_DATA;
3420 }
3421
3422 return result;
3423 }
3424
3425 /*!
3426 * brief Configure flexible rx parser.
3427 *
3428 * This function is used to configure the flexible rx parser table.
3429 *
3430 * param base ENET peripheral base address..
3431 * param rxpConfig The rx parser configuration pointer.
3432 * param entryCount The rx parser entry count.
3433 * retval kStatus_Success Configure rx parser success.
3434 * retval kStatus_ENET_QOS_Timeout Poll status flag timeout.
3435 */
ENET_QOS_ConfigureRxParser(ENET_QOS_Type * base,enet_qos_rxp_config_t * rxpConfig,uint16_t entryCount)3436 status_t ENET_QOS_ConfigureRxParser(ENET_QOS_Type *base, enet_qos_rxp_config_t *rxpConfig, uint16_t entryCount)
3437 {
3438 assert(rxpConfig != NULL);
3439 assert(entryCount <= ENET_QOS_RXP_ENTRY_COUNT);
3440
3441 uint32_t *dataPtr;
3442 uint32_t entrySize = sizeof(enet_qos_rxp_config_t) / sizeof(uint32_t);
3443 uint32_t value = 0U;
3444 status_t result = kStatus_Success;
3445 bool enableRx = false;
3446
3447 /* Disable the MAC rx. */
3448 if (0U != (base->MAC_CONFIGURATION & ENET_QOS_MAC_CONFIGURATION_RE_MASK))
3449 {
3450 base->MAC_CONFIGURATION &= ~ENET_QOS_MAC_CONFIGURATION_RE_MASK;
3451 enableRx = true;
3452 }
3453
3454 /* Disable frame parser. */
3455 result = ENET_QOS_EnableRxParser(base, false);
3456
3457 if (kStatus_Success != result)
3458 {
3459 return result;
3460 }
3461
3462 for (uint8_t count = 0; count < entryCount; count++)
3463 {
3464 for (uint8_t i = 0; i < entrySize; i++)
3465 {
3466 /* Wait hardware not busy */
3467 result = ENET_QOS_PollStatusFlag(&base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS,
3468 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3469 if (kStatus_Success != result)
3470 {
3471 return result;
3472 }
3473
3474 dataPtr = (uint32_t *)(void *)&rxpConfig[count];
3475 dataPtr = &dataPtr[i];
3476
3477 /* Write data before issue write command */
3478 base->MTL_RXP_INDIRECT_ACC_DATA = *dataPtr;
3479
3480 /* Write address and issue write command */
3481 value = ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_ADDR(entrySize * count + i);
3482 // base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3483
3484 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_WRRDN_MASK;
3485 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3486
3487 /* Start write */
3488 value |= ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK;
3489 base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS = value;
3490 }
3491 }
3492
3493 /* Wait hardware not busy */
3494 result = ENET_QOS_PollStatusFlag(&(base->MTL_RXP_INDIRECT_ACC_CONTROL_STATUS),
3495 ENET_QOS_MTL_RXP_INDIRECT_ACC_CONTROL_STATUS_STARTBUSY_MASK, 0U);
3496 if (kStatus_Success != result)
3497 {
3498 return result;
3499 }
3500
3501 /* Program NVE and NPE. */
3502 value = base->MTL_RXP_CONTROL_STATUS;
3503 value &= ~(ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE_MASK | ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE_MASK);
3504
3505 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NPE((uint32_t)entryCount - 1U);
3506 if (entryCount < 3U)
3507 {
3508 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE(2U);
3509 }
3510 else
3511 {
3512 value |= ENET_QOS_MTL_RXP_CONTROL_STATUS_NVE((uint32_t)entryCount - 1U);
3513 }
3514
3515 base->MTL_RXP_CONTROL_STATUS = value;
3516
3517 /* Enable frame parser. */
3518 result = ENET_QOS_EnableRxParser(base, true);
3519
3520 /* Enable Receive */
3521 if (enableRx)
3522 {
3523 base->MAC_CONFIGURATION |= ENET_QOS_MAC_CONFIGURATION_RE_MASK;
3524 }
3525
3526 return result;
3527 }
3528
3529 /*!
3530 * brief Gets statistical data in transfer.
3531 *
3532 * param base ENET_QOS peripheral base address.
3533 * param statistics The statistics structure pointer.
3534 */
ENET_QOS_GetStatistics(ENET_QOS_Type * base,enet_qos_transfer_stats_t * statistics)3535 void ENET_QOS_GetStatistics(ENET_QOS_Type *base, enet_qos_transfer_stats_t *statistics)
3536 {
3537 /* Rx statistics */
3538 statistics->statsRxFrameCount = base->MAC_RX_PACKETS_COUNT_GOOD_BAD;
3539 statistics->statsRxCrcErr = base->MAC_RX_CRC_ERROR_PACKETS;
3540 statistics->statsRxAlignErr = base->MAC_RX_ALIGNMENT_ERROR_PACKETS;
3541 statistics->statsRxLengthErr = base->MAC_RX_LENGTH_ERROR_PACKETS;
3542 statistics->statsRxFifoOverflowErr = base->MAC_RX_FIFO_OVERFLOW_PACKETS;
3543
3544 /* Tx statistics */
3545 statistics->statsTxFrameCount = base->MAC_TX_PACKET_COUNT_GOOD_BAD;
3546 statistics->statsTxFifoUnderRunErr = base->MAC_TX_UNDERFLOW_ERROR_PACKETS;
3547 }
3548
3549 /*!
3550 * brief The ENET IRQ handler.
3551 *
3552 * param base ENET peripheral base address.
3553 * param handle The ENET handler pointer.
3554 */
ENET_QOS_CommonIRQHandler(ENET_QOS_Type * base,enet_qos_handle_t * handle)3555 void ENET_QOS_CommonIRQHandler(ENET_QOS_Type *base, enet_qos_handle_t *handle)
3556 {
3557 /* Check for the interrupt source type. */
3558 /* DMA CHANNEL 0. */
3559 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC0IS_MASK) != 0U)
3560 {
3561 uint32_t flag = base->DMA_CH[0].DMA_CHX_STAT;
3562 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3563 {
3564 base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3565 if (handle->callback != NULL)
3566 {
3567 handle->callback(base, handle, kENET_QOS_RxIntEvent, 0, handle->userData);
3568 }
3569 }
3570 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3571 {
3572 base->DMA_CH[0].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3573 ENET_QOS_ReclaimTxDescriptor(base, handle, 0);
3574 }
3575 }
3576
3577 /* DMA CHANNEL 1. */
3578 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC1IS_MASK) != 0U)
3579 {
3580 uint32_t flag = base->DMA_CH[1].DMA_CHX_STAT;
3581 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3582 {
3583 base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3584 if (handle->callback != NULL)
3585 {
3586 handle->callback(base, handle, kENET_QOS_RxIntEvent, 1, handle->userData);
3587 }
3588 }
3589 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3590 {
3591 base->DMA_CH[1].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3592 ENET_QOS_ReclaimTxDescriptor(base, handle, 1);
3593 }
3594 }
3595
3596 /* DMA CHANNEL 2. */
3597 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC2IS_MASK) != 0U)
3598 {
3599 uint32_t flag = base->DMA_CH[2].DMA_CHX_STAT;
3600 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3601 {
3602 base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3603 if (handle->callback != NULL)
3604 {
3605 handle->callback(base, handle, kENET_QOS_RxIntEvent, 2, handle->userData);
3606 }
3607 }
3608 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3609 {
3610 base->DMA_CH[2].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3611 ENET_QOS_ReclaimTxDescriptor(base, handle, 2);
3612 }
3613 }
3614
3615 /* DMA CHANNEL 3. */
3616 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_DC3IS_MASK) != 0U)
3617 {
3618 uint32_t flag = base->DMA_CH[3].DMA_CHX_STAT;
3619 if ((flag & ENET_QOS_DMA_CHX_STAT_RI_MASK) != 0U)
3620 {
3621 base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_RI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3622 if (handle->callback != NULL)
3623 {
3624 handle->callback(base, handle, kENET_QOS_RxIntEvent, 3, handle->userData);
3625 }
3626 }
3627 if ((flag & ENET_QOS_DMA_CHX_STAT_TI_MASK) != 0U)
3628 {
3629 base->DMA_CH[3].DMA_CHX_STAT = ENET_QOS_DMA_CHX_STAT_TI_MASK | ENET_QOS_DMA_CHX_STAT_NIS_MASK;
3630 ENET_QOS_ReclaimTxDescriptor(base, handle, 3);
3631 }
3632 }
3633
3634 /* MAC TIMESTAMP. */
3635 if ((base->DMA_INTERRUPT_STATUS & ENET_QOS_DMA_INTERRUPT_STATUS_MACIS_MASK) != 0U)
3636 {
3637 if ((base->MAC_INTERRUPT_STATUS & ENET_QOS_MAC_INTERRUPT_STATUS_TSIS_MASK) != 0U)
3638 {
3639 if (handle->callback != NULL)
3640 {
3641 handle->callback(base, handle, kENET_QOS_TimeStampIntEvent, 0, handle->userData);
3642 }
3643 }
3644 }
3645 SDK_ISR_EXIT_BARRIER;
3646 }
3647
3648 #if defined(ENET_QOS)
3649 void ENET_QOS_DriverIRQHandler(void);
ENET_QOS_DriverIRQHandler(void)3650 void ENET_QOS_DriverIRQHandler(void)
3651 {
3652 s_enetqosIsr(ENET_QOS, s_ENETHandle[0]);
3653 }
3654 #endif
3655
3656 #if defined(CONNECTIVITY__ENET_QOS)
3657 void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void);
CONNECTIVITY_EQOS_INT_DriverIRQHandler(void)3658 void CONNECTIVITY_EQOS_INT_DriverIRQHandler(void)
3659 {
3660 s_enetqosIsr(CONNECTIVITY__ENET_QOS, s_ENETHandle[0]);
3661 }
3662 #endif
3663