1 /*
2 * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <string.h>
7 #include "sdkconfig.h"
8 #include "esp_attr.h"
9 #include "hal/emac_hal.h"
10 #include "hal/emac_ll.h"
11 #include "hal/gpio_ll.h"
12
13 #define ETH_CRC_LENGTH (4)
14
15 #ifndef NDEBUG
16 #define EMAC_HAL_BUF_MAGIC_ID 0x1E1C8416
17 #endif // NDEBUG
18
19 typedef struct {
20 #ifndef NDEBUG
21 uint32_t magic_id;
22 #endif // NDEBUG
23 uint32_t copy_len;
24 }__attribute__((packed)) emac_hal_auto_buf_info_t;
25
emac_hal_flush_trans_fifo(emac_hal_context_t * hal)26 static esp_err_t emac_hal_flush_trans_fifo(emac_hal_context_t *hal)
27 {
28 emac_ll_flush_trans_fifo_enable(hal->dma_regs, true);
29 /* no other writes to the Operation Mode register until the flush tx fifo bit is cleared */
30 for (uint32_t i = 0; i < 1000; i++) {
31 if (emac_ll_get_flush_trans_fifo(hal->dma_regs) == 0) {
32 return ESP_OK;
33 }
34 }
35 return ESP_ERR_TIMEOUT;
36 }
37
emac_hal_iomux_init_mii(void)38 void emac_hal_iomux_init_mii(void)
39 {
40 /* TX_CLK to GPIO0 */
41 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO0_U, FUNC_GPIO0_EMAC_TX_CLK);
42 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[0]);
43 /* TX_EN to GPIO21 */
44 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO21_U, FUNC_GPIO21_EMAC_TX_EN);
45 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[21]);
46 /* TXD0 to GPIO19 */
47 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO19_U, FUNC_GPIO19_EMAC_TXD0);
48 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[19]);
49 /* TXD1 to GPIO22 */
50 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO22_U, FUNC_GPIO22_EMAC_TXD1);
51 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[22]);
52 /* TXD2 to MTMS */
53 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_MTMS_U, FUNC_MTMS_EMAC_TXD2);
54 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[14]);
55 /* TXD3 to MTDI */
56 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_MTDI_U, FUNC_MTDI_EMAC_TXD3);
57 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[12]);
58
59 /* RX_CLK to GPIO5 */
60 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO5_U, FUNC_GPIO5_EMAC_RX_CLK);
61 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[5]);
62 /* RX_DV to GPIO27 */
63 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO27_U, FUNC_GPIO27_EMAC_RX_DV);
64 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[27]);
65 /* RXD0 to GPIO25 */
66 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO25_U, FUNC_GPIO25_EMAC_RXD0);
67 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[25]);
68 /* RXD1 to GPIO26 */
69 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO26_U, FUNC_GPIO26_EMAC_RXD1);
70 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[26]);
71 /* RXD2 to U0TXD */
72 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_U0TXD_U, FUNC_U0TXD_EMAC_RXD2);
73 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[1]);
74 /* RXD3 to MTDO */
75 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_MTDO_U, FUNC_MTDO_EMAC_RXD3);
76 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[15]);
77 }
78
emac_hal_iomux_rmii_clk_input(void)79 void emac_hal_iomux_rmii_clk_input(void)
80 {
81 /* REF_CLK(RMII mode) to GPIO0 */
82 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO0_U, FUNC_GPIO0_EMAC_TX_CLK);
83 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[0]);
84 }
85
emac_hal_iomux_rmii_clk_output(int num)86 void emac_hal_iomux_rmii_clk_output(int num)
87 {
88 switch (num) {
89 case 0:
90 /* APLL clock output to GPIO0 (must be configured to 50MHz!) */
91 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO0_U, FUNC_GPIO0_CLK_OUT1);
92 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[0]);
93 break;
94 case 16:
95 /* RMII CLK (50MHz) output to GPIO16 */
96 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO16_U, FUNC_GPIO16_EMAC_CLK_OUT);
97 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[16]);
98 break;
99 case 17:
100 /* RMII CLK (50MHz) output to GPIO17 */
101 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO17_U, FUNC_GPIO17_EMAC_CLK_OUT_180);
102 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[17]);
103 break;
104 default:
105 break;
106 }
107 }
108
emac_hal_iomux_init_rmii(void)109 void emac_hal_iomux_init_rmii(void)
110 {
111 /* TX_EN to GPIO21 */
112 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO21_U, FUNC_GPIO21_EMAC_TX_EN);
113 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[21]);
114 /* TXD0 to GPIO19 */
115 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO19_U, FUNC_GPIO19_EMAC_TXD0);
116 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[19]);
117 /* TXD1 to GPIO22 */
118 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO22_U, FUNC_GPIO22_EMAC_TXD1);
119 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[22]);
120
121 /* CRS_DV to GPIO27 */
122 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO27_U, FUNC_GPIO27_EMAC_RX_DV);
123 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[27]);
124 /* RXD0 to GPIO25 */
125 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO25_U, FUNC_GPIO25_EMAC_RXD0);
126 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[25]);
127 /* RXD1 to GPIO26 */
128 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO26_U, FUNC_GPIO26_EMAC_RXD1);
129 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[26]);
130 }
131
emac_hal_iomux_init_tx_er(void)132 void emac_hal_iomux_init_tx_er(void)
133 {
134 /* TX_ER to GPIO4 */
135 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_GPIO4_U, FUNC_GPIO4_EMAC_TX_ER);
136 PIN_INPUT_DISABLE(GPIO_PIN_MUX_REG[4]);
137 }
138
emac_hal_iomux_init_rx_er(void)139 void emac_hal_iomux_init_rx_er(void)
140 {
141 /* RX_ER to MTCK */
142 gpio_ll_iomux_func_sel(PERIPHS_IO_MUX_MTCK_U, FUNC_MTCK_EMAC_RX_ER);
143 PIN_INPUT_ENABLE(GPIO_PIN_MUX_REG[13]);
144 }
145
emac_hal_init(emac_hal_context_t * hal,void * descriptors,uint8_t ** rx_buf,uint8_t ** tx_buf)146 void emac_hal_init(emac_hal_context_t *hal, void *descriptors,
147 uint8_t **rx_buf, uint8_t **tx_buf)
148 {
149 hal->dma_regs = &EMAC_DMA;
150 hal->mac_regs = &EMAC_MAC;
151 hal->ext_regs = &EMAC_EXT;
152 hal->descriptors = descriptors;
153 hal->rx_buf = rx_buf;
154 hal->tx_buf = tx_buf;
155 }
156
emac_hal_set_csr_clock_range(emac_hal_context_t * hal,int freq)157 void emac_hal_set_csr_clock_range(emac_hal_context_t *hal, int freq)
158 {
159 /* Tell MAC system clock Frequency in MHz, which will determine the frequency range of MDC(1MHz~2.5MHz) */
160 if (freq >= 20000000 && freq < 35000000) {
161 emac_ll_set_csr_clock_division(hal->mac_regs, 2); // CSR clock/16
162 } else if (freq >= 35000000 && freq < 60000000) {
163 emac_ll_set_csr_clock_division(hal->mac_regs, 3); // CSR clock/26
164 } else if (freq >= 60000000 && freq < 100000000) {
165 emac_ll_set_csr_clock_division(hal->mac_regs, 0); // CSR clock/42
166 } else if (freq >= 100000000 && freq < 150000000) {
167 emac_ll_set_csr_clock_division(hal->mac_regs, 1); // CSR clock/62
168 } else if (freq >= 150000000 && freq < 250000000) {
169 emac_ll_set_csr_clock_division(hal->mac_regs, 4); // CSR clock/102
170 } else {
171 emac_ll_set_csr_clock_division(hal->mac_regs, 5); // CSR clock/124
172 }
173 }
174
emac_hal_reset_desc_chain(emac_hal_context_t * hal)175 void emac_hal_reset_desc_chain(emac_hal_context_t *hal)
176 {
177 /* reset DMA descriptors */
178 hal->rx_desc = (eth_dma_rx_descriptor_t *)(hal->descriptors);
179 hal->tx_desc = (eth_dma_tx_descriptor_t *)((uint32_t)hal->descriptors +
180 (sizeof(eth_dma_rx_descriptor_t) * CONFIG_ETH_DMA_RX_BUFFER_NUM));
181 /* init rx chain */
182 for (int i = 0; i < CONFIG_ETH_DMA_RX_BUFFER_NUM; i++) {
183 /* Set Own bit of the Rx descriptor Status: DMA */
184 hal->rx_desc[i].RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
185 /* Set Buffer1 size and Second Address Chained bit */
186 hal->rx_desc[i].RDES1.SecondAddressChained = 1;
187 hal->rx_desc[i].RDES1.ReceiveBuffer1Size = CONFIG_ETH_DMA_BUFFER_SIZE;
188 /* Enable Ethernet DMA Rx Descriptor interrupt */
189 hal->rx_desc[i].RDES1.DisableInterruptOnComplete = 0;
190 /* point to the buffer */
191 hal->rx_desc[i].Buffer1Addr = (uint32_t)(hal->rx_buf[i]);
192 /* point to next descriptor */
193 hal->rx_desc[i].Buffer2NextDescAddr = (uint32_t)(hal->rx_desc + i + 1);
194 }
195 /* For last descriptor, set next descriptor address register equal to the first descriptor base address */
196 hal->rx_desc[CONFIG_ETH_DMA_RX_BUFFER_NUM - 1].Buffer2NextDescAddr = (uint32_t)(hal->rx_desc);
197
198 /* init tx chain */
199 for (int i = 0; i < CONFIG_ETH_DMA_TX_BUFFER_NUM; i++) {
200 /* Set Own bit of the Tx descriptor Status: CPU */
201 hal->tx_desc[i].TDES0.Own = EMAC_LL_DMADESC_OWNER_CPU;
202 hal->tx_desc[i].TDES0.SecondAddressChained = 1;
203 hal->tx_desc[i].TDES1.TransmitBuffer1Size = CONFIG_ETH_DMA_BUFFER_SIZE;
204 /* Enable Ethernet DMA Tx Descriptor interrupt */
205 hal->tx_desc[1].TDES0.InterruptOnComplete = 1;
206 /* Enable Transmit Timestamp */
207 hal->tx_desc[i].TDES0.TransmitTimestampEnable = 1;
208 /* point to the buffer */
209 hal->tx_desc[i].Buffer1Addr = (uint32_t)(hal->tx_buf[i]);
210 /* point to next descriptor */
211 hal->tx_desc[i].Buffer2NextDescAddr = (uint32_t)(hal->tx_desc + i + 1);
212 }
213 /* For last descriptor, set next descriptor address register equal to the first descriptor base address */
214 hal->tx_desc[CONFIG_ETH_DMA_TX_BUFFER_NUM - 1].Buffer2NextDescAddr = (uint32_t)(hal->tx_desc);
215
216 /* set base address of the first descriptor */
217 emac_ll_set_rx_desc_addr(hal->dma_regs, (uint32_t)hal->rx_desc);
218 emac_ll_set_tx_desc_addr(hal->dma_regs, (uint32_t)hal->tx_desc);
219 }
220
emac_hal_init_mac_default(emac_hal_context_t * hal)221 void emac_hal_init_mac_default(emac_hal_context_t *hal)
222 {
223 /* MACCR Configuration */
224 /* Enable the watchdog on the receiver, frame longer than 2048 Bytes is not allowed */
225 emac_ll_watchdog_enable(hal->mac_regs, true);
226 /* Enable the jabber timer on the transmitter, frame longer than 2048 Bytes is not allowed */
227 emac_ll_jabber_enable(hal->mac_regs, true);
228 /* minimum IFG between frames during transmission is 96 bit times */
229 emac_ll_set_inter_frame_gap(hal->mac_regs, EMAC_LL_INTERFRAME_GAP_96BIT);
230 /* Enable Carrier Sense During Transmission */
231 emac_ll_carrier_sense_enable(hal->mac_regs, true);
232 /* Select speed: port: 10/100 Mbps, here set default 100M, afterwards, will reset by auto-negotiation */
233 emac_ll_set_port_speed(hal->mac_regs, ETH_SPEED_100M);;
234 /* Allow the reception of frames when the TX_EN signal is asserted in Half-Duplex mode */
235 emac_ll_recv_own_enable(hal->mac_regs, true);
236 /* Disable internal loopback mode */
237 emac_ll_loopback_enable(hal->mac_regs, false);
238 /* Select duplex mode: here set default full duplex, afterwards, will reset by auto-negotiation */
239 emac_ll_set_duplex(hal->mac_regs, ETH_DUPLEX_FULL);
240 /* Select the checksum mode for received frame payload's TCP/UDP/ICMP headers */
241 emac_ll_checksum_offload_mode(hal->mac_regs, ETH_CHECKSUM_HW);
242 /* Enable MAC retry transmission when a colision occurs in half duplex mode */
243 emac_ll_retry_enable(hal->mac_regs, true);
244 /* MAC passes all incoming frames to host, without modifying them */
245 emac_ll_auto_pad_crc_strip_enable(hal->mac_regs, false);
246 /* Set Back-Off limit time before retry a transmittion after a collision */
247 emac_ll_set_back_off_limit(hal->mac_regs, EMAC_LL_BACKOFF_LIMIT_10);
248 /* Disable deferral check, MAC defers until the CRS signal goes inactive */
249 emac_ll_deferral_check_enable(hal->mac_regs, false);
250 /* Set preamble length 7 Bytes */
251 emac_ll_set_preamble_length(hal->mac_regs, EMAC_LL_PREAMBLE_LENGTH_7);
252
253 /* MACFFR Configuration */
254 /* Receiver module passes only those frames to the Application that pass the SA or DA address filter */
255 emac_ll_receive_all_enable(hal->mac_regs, false);
256 /* Disable source address filter */
257 emac_ll_set_src_addr_filter(hal->mac_regs, EMAC_LL_SOURCE_ADDR_FILTER_DISABLE);
258 emac_ll_sa_inverse_filter_enable(hal->mac_regs, false);
259 /* MAC blocks all control frames */
260 emac_ll_set_pass_ctrl_frame_mode(hal->mac_regs, EMAC_LL_CONTROL_FRAME_BLOCKALL);
261 /* AFM module passes all received broadcast frames and multicast frames */
262 emac_ll_broadcast_frame_enable(hal->mac_regs, true);
263 emac_ll_pass_all_multicast_enable(hal->mac_regs, true);
264 /* Address Check block operates in normal filtering mode for the DA address */
265 emac_ll_da_inverse_filter_enable(hal->mac_regs, false);
266 /* Disable Promiscuous Mode */
267 emac_ll_promiscuous_mode_enable(hal->mac_regs, false);
268 }
269
emac_hal_enable_flow_ctrl(emac_hal_context_t * hal,bool enable)270 void emac_hal_enable_flow_ctrl(emac_hal_context_t *hal, bool enable)
271 {
272 /* MACFCR Configuration */
273 if (enable) {
274 /* Pause time */
275 emac_ll_set_pause_time(hal->mac_regs, EMAC_LL_PAUSE_TIME);
276 /* Enable generation of Zero-Quanta Pause Control frames */
277 emac_ll_zero_quanta_pause_enable(hal->mac_regs, true);
278 /* Threshold of the PAUSE to be checked for automatic retransmission of PAUSE Frame */
279 emac_ll_set_pause_low_threshold(hal->mac_regs, EMAC_LL_PAUSE_LOW_THRESHOLD_MINUS_28);
280 /* Don't allow MAC detect Pause frames with MAC address0 unicast address and unique multicast address */
281 emac_ll_unicast_pause_frame_detect_enable(hal->mac_regs, false);
282 /* Enable MAC to decode the received Pause frame and disable its transmitter for a specific time */
283 emac_ll_receive_flow_ctrl_enable(hal->mac_regs, true);
284 /* Enable MAC to transmit Pause frames in full duplex mode or the MAC back-pressure operation in half duplex mode */
285 emac_ll_transmit_flow_ctrl_enable(hal->mac_regs, true);
286 } else {
287 emac_ll_clear(hal->mac_regs);
288 }
289 }
290
emac_hal_init_dma_default(emac_hal_context_t * hal,emac_hal_dma_config_t * hal_config)291 void emac_hal_init_dma_default(emac_hal_context_t *hal, emac_hal_dma_config_t *hal_config)
292 {
293 /* DMAOMR Configuration */
294 /* Enable Dropping of TCP/IP Checksum Error Frames */
295 emac_ll_drop_tcp_err_frame_enable(hal->dma_regs, true);
296 /* Enable Receive Store Forward */
297 emac_ll_recv_store_forward_enable(hal->dma_regs, true);
298 /* Enable Flushing of Received Frames because of the unavailability of receive descriptors or buffers */
299 emac_ll_flush_recv_frame_enable(hal->dma_regs, true);
300 /* Disable Transmit Store Forward */
301 emac_ll_trans_store_forward_enable(hal->dma_regs, false);
302 /* Flush Transmit FIFO */
303 emac_hal_flush_trans_fifo(hal);
304 /* Transmit Threshold Control */
305 emac_ll_set_transmit_threshold(hal->dma_regs, EMAC_LL_TRANSMIT_THRESHOLD_CONTROL_64);
306 /* Disable Forward Error Frame */
307 emac_ll_forward_err_frame_enable(hal->dma_regs, false);
308 /* Disable forward undersized good frame */
309 emac_ll_forward_undersized_good_frame_enable(hal->dma_regs, false);
310 /* Receive Threshold Control */
311 emac_ll_set_recv_threshold(hal->dma_regs, EMAC_LL_RECEIVE_THRESHOLD_CONTROL_64);
312 /* Allow the DMA to process a second frame of Transmit data even before obtaining the status for the first frame */
313 emac_ll_opt_second_frame_enable(hal->dma_regs, true);
314
315 /* DMABMR Configuration */
316 /* Enable Mixed Burst */
317 emac_ll_mixed_burst_enable(hal->dma_regs, true);
318 /* Enable Address Aligned Beates */
319 emac_ll_addr_align_enable(hal->dma_regs, true);
320 /* Don't use Separate PBL */
321 emac_ll_use_separate_pbl_enable(hal->dma_regs, false);
322 /* Set Rx/Tx DMA Burst Length */
323 emac_ll_set_prog_burst_len(hal->dma_regs, hal_config->dma_burst_len);
324 /* Enable Enhanced Descriptor,8 Words(32 Bytes) */
325 emac_ll_enhance_desc_enable(hal->dma_regs, true);
326 /* Specifies the number of word to skip between two unchained descriptors (Ring mode) */
327 emac_ll_set_desc_skip_len(hal->dma_regs, 0);
328 /* DMA Arbitration Scheme */
329 emac_ll_fixed_arbitration_enable(hal->dma_regs, false);
330 /* Set priority ratio in the weighted round-robin arbitration between Rx DMA and Tx DMA */
331 emac_ll_set_priority_ratio(hal->dma_regs, EMAC_LL_DMA_ARBITRATION_ROUNDROBIN_RXTX_1_1);
332 }
333
emac_hal_set_phy_cmd(emac_hal_context_t * hal,uint32_t phy_addr,uint32_t phy_reg,bool write)334 void emac_hal_set_phy_cmd(emac_hal_context_t *hal, uint32_t phy_addr, uint32_t phy_reg, bool write)
335 {
336 /* Write the result value into the MII Address register */
337 emac_ll_set_phy_addr(hal->mac_regs, phy_addr);
338 /* Set the PHY register address */
339 emac_ll_set_phy_reg(hal->mac_regs, phy_reg);
340 /* Set as write mode */
341 emac_ll_write_enable(hal->mac_regs, write);
342 /* Set MII busy bit */
343 emac_ll_set_busy(hal->mac_regs, true);
344
345 }
346
emac_hal_set_address(emac_hal_context_t * hal,uint8_t * mac_addr)347 void emac_hal_set_address(emac_hal_context_t *hal, uint8_t *mac_addr)
348 {
349 /* Make sure mac address is unicast type */
350 if (!(mac_addr[0] & 0x01)) {
351 emac_ll_set_addr(hal->mac_regs, mac_addr);
352 }
353 }
354
emac_hal_start(emac_hal_context_t * hal)355 void emac_hal_start(emac_hal_context_t *hal)
356 {
357 /* Enable Ethernet MAC and DMA Interrupt */
358 emac_ll_enable_corresponding_intr(hal->dma_regs, EMAC_LL_CONFIG_ENABLE_INTR_MASK);
359 /* Clear all pending interrupts */
360 emac_ll_clear_all_pending_intr(hal->dma_regs);
361
362 /* Enable transmit state machine of the MAC for transmission on the MII */
363 emac_ll_transmit_enable(hal->mac_regs, true);
364 /* Start DMA transmission */
365 /* Note that the EMAC Databook states the DMA could be started prior enabling
366 the MAC transmitter. However, it turned out that such order may cause the MAC
367 transmitter hangs */
368 emac_ll_start_stop_dma_transmit(hal->dma_regs, true);
369
370 /* Start DMA reception */
371 emac_ll_start_stop_dma_receive(hal->dma_regs, true);
372 /* Enable receive state machine of the MAC for reception from the MII */
373 emac_ll_receive_enable(hal->mac_regs, true);
374 }
375
emac_hal_stop(emac_hal_context_t * hal)376 esp_err_t emac_hal_stop(emac_hal_context_t *hal)
377 {
378 /* Stop DMA transmission */
379 emac_ll_start_stop_dma_transmit(hal->dma_regs, false);
380
381 if (emac_ll_transmit_frame_ctrl_status(hal->mac_regs) != 0x0) {
382 /* Previous transmit in progress */
383 return ESP_ERR_INVALID_STATE;
384 }
385
386 /* Disable transmit state machine of the MAC for transmission on the MII */
387 emac_ll_receive_enable(hal->mac_regs, false);
388 /* Disable receive state machine of the MAC for reception from the MII */
389 emac_ll_transmit_enable(hal->mac_regs, false);
390
391 if (emac_ll_receive_read_ctrl_state(hal->mac_regs) != 0x0) {
392 /* Previous receive copy in progress */
393 return ESP_ERR_INVALID_STATE;
394 }
395
396 /* Stop DMA reception */
397 emac_ll_start_stop_dma_receive(hal->dma_regs, false);
398
399 /* Flush Transmit FIFO */
400 emac_hal_flush_trans_fifo(hal);
401
402 /* Disable Ethernet MAC and DMA Interrupt */
403 emac_ll_disable_all_intr(hal->dma_regs);
404
405 return ESP_OK;
406 }
407
emac_hal_transmit_frame(emac_hal_context_t * hal,uint8_t * buf,uint32_t length)408 uint32_t emac_hal_transmit_frame(emac_hal_context_t *hal, uint8_t *buf, uint32_t length)
409 {
410 /* Get the number of Tx buffers to use for the frame */
411 uint32_t bufcount = 0;
412 uint32_t lastlen = length;
413 uint32_t sentout = 0;
414 while (lastlen > CONFIG_ETH_DMA_BUFFER_SIZE) {
415 lastlen -= CONFIG_ETH_DMA_BUFFER_SIZE;
416 bufcount++;
417 }
418 if (lastlen) {
419 bufcount++;
420 }
421 if (bufcount > CONFIG_ETH_DMA_TX_BUFFER_NUM) {
422 goto err;
423 }
424
425 eth_dma_tx_descriptor_t *desc_iter = hal->tx_desc;
426 /* A frame is transmitted in multiple descriptor */
427 for (size_t i = 0; i < bufcount; i++) {
428 /* Check if the descriptor is owned by the Ethernet DMA (when 1) or CPU (when 0) */
429 if (desc_iter->TDES0.Own != EMAC_LL_DMADESC_OWNER_CPU) {
430 goto err;
431 }
432 /* Clear FIRST and LAST segment bits */
433 desc_iter->TDES0.FirstSegment = 0;
434 desc_iter->TDES0.LastSegment = 0;
435 desc_iter->TDES0.InterruptOnComplete = 0;
436 if (i == 0) {
437 /* Setting the first segment bit */
438 desc_iter->TDES0.FirstSegment = 1;
439 }
440 if (i == (bufcount - 1)) {
441 /* Setting the last segment bit */
442 desc_iter->TDES0.LastSegment = 1;
443 /* Enable transmit interrupt */
444 desc_iter->TDES0.InterruptOnComplete = 1;
445 /* Program size */
446 desc_iter->TDES1.TransmitBuffer1Size = lastlen;
447 /* copy data from uplayer stack buffer */
448 memcpy((void *)(desc_iter->Buffer1Addr), buf + i * CONFIG_ETH_DMA_BUFFER_SIZE, lastlen);
449 sentout += lastlen;
450 } else {
451 /* Program size */
452 desc_iter->TDES1.TransmitBuffer1Size = CONFIG_ETH_DMA_BUFFER_SIZE;
453 /* copy data from uplayer stack buffer */
454 memcpy((void *)(desc_iter->Buffer1Addr), buf + i * CONFIG_ETH_DMA_BUFFER_SIZE, CONFIG_ETH_DMA_BUFFER_SIZE);
455 sentout += CONFIG_ETH_DMA_BUFFER_SIZE;
456 }
457 /* Point to next descriptor */
458 desc_iter = (eth_dma_tx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
459 }
460
461 /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */
462 for (size_t i = 0; i < bufcount; i++) {
463 hal->tx_desc->TDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
464 hal->tx_desc = (eth_dma_tx_descriptor_t *)(hal->tx_desc->Buffer2NextDescAddr);
465 }
466 emac_ll_transmit_poll_demand(hal->dma_regs, 0);
467 return sentout;
468 err:
469 return 0;
470 }
471
emac_hal_transmit_multiple_buf_frame(emac_hal_context_t * hal,uint8_t ** buffs,uint32_t * lengths,uint32_t buffs_cnt)472 uint32_t emac_hal_transmit_multiple_buf_frame(emac_hal_context_t *hal, uint8_t **buffs, uint32_t *lengths, uint32_t buffs_cnt)
473 {
474 /* Get the number of Tx buffers to use for the frame */
475 uint32_t dma_bufcount = 0;
476 uint32_t sentout = 0;
477 uint8_t *ptr = buffs[0];
478 uint32_t lastlen = lengths[0];
479 uint32_t avail_len = CONFIG_ETH_DMA_BUFFER_SIZE;
480
481 eth_dma_tx_descriptor_t *desc_iter = hal->tx_desc;
482 /* A frame is transmitted in multiple descriptor */
483 while (dma_bufcount < CONFIG_ETH_DMA_TX_BUFFER_NUM) {
484 /* Check if the descriptor is owned by the Ethernet DMA (when 1) or CPU (when 0) */
485 if (desc_iter->TDES0.Own != EMAC_LL_DMADESC_OWNER_CPU) {
486 goto err;
487 }
488 /* Clear FIRST and LAST segment bits */
489 desc_iter->TDES0.FirstSegment = 0;
490 desc_iter->TDES0.LastSegment = 0;
491 desc_iter->TDES0.InterruptOnComplete = 0;
492 desc_iter->TDES1.TransmitBuffer1Size = 0;
493 if (dma_bufcount == 0) {
494 /* Setting the first segment bit */
495 desc_iter->TDES0.FirstSegment = 1;
496 }
497
498 while (buffs_cnt > 0) {
499 /* Check if input buff data fits to currently available space in the descriptor */
500 if (lastlen < avail_len) {
501 /* copy data from uplayer stack buffer */
502 memcpy((void *)(desc_iter->Buffer1Addr + (CONFIG_ETH_DMA_BUFFER_SIZE - avail_len)), ptr, lastlen);
503 sentout += lastlen;
504 avail_len -= lastlen;
505 desc_iter->TDES1.TransmitBuffer1Size += lastlen;
506
507 /* Update processed input buffers info */
508 buffs_cnt--;
509 ptr = *(++buffs);
510 lastlen = *(++lengths);
511 /* There is only limited available space in the current descriptor, use it all */
512 } else {
513 /* copy data from uplayer stack buffer */
514 memcpy((void *)(desc_iter->Buffer1Addr + (CONFIG_ETH_DMA_BUFFER_SIZE - avail_len)), ptr, avail_len);
515 sentout += avail_len;
516 lastlen -= avail_len;
517 /* If lastlen is not zero, input buff will be fragmented over multiple descriptors */
518 if (lastlen > 0) {
519 ptr += avail_len;
520 /* Input buff fully fits the descriptor, move to the next input buff */
521 } else {
522 /* Update processed input buffers info */
523 buffs_cnt--;
524 ptr = *(++buffs);
525 lastlen = *(++lengths);
526 }
527 avail_len = CONFIG_ETH_DMA_BUFFER_SIZE;
528 desc_iter->TDES1.TransmitBuffer1Size = CONFIG_ETH_DMA_BUFFER_SIZE;
529 /* The descriptor is full here so exit and use the next descriptor */
530 break;
531 }
532 }
533 /* Increase counter of utilized DMA buffers */
534 dma_bufcount++;
535
536 /* If all input buffers processed, mark as LAST segment and finish the coping */
537 if (buffs_cnt == 0) {
538 /* Setting the last segment bit */
539 desc_iter->TDES0.LastSegment = 1;
540 /* Enable transmit interrupt */
541 desc_iter->TDES0.InterruptOnComplete = 1;
542 break;
543 }
544
545 /* Point to next descriptor */
546 desc_iter = (eth_dma_tx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
547 }
548
549 /* Set Own bit of the Tx descriptor Status: gives the buffer back to ETHERNET DMA */
550 for (size_t i = 0; i < dma_bufcount; i++) {
551 hal->tx_desc->TDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
552 hal->tx_desc = (eth_dma_tx_descriptor_t *)(hal->tx_desc->Buffer2NextDescAddr);
553 }
554 emac_ll_transmit_poll_demand(hal->dma_regs, 0);
555 return sentout;
556 err:
557 return 0;
558 }
559
emac_hal_alloc_recv_buf(emac_hal_context_t * hal,uint32_t * size)560 uint8_t *emac_hal_alloc_recv_buf(emac_hal_context_t *hal, uint32_t *size)
561 {
562 eth_dma_rx_descriptor_t *desc_iter = hal->rx_desc;
563 uint32_t used_descs = 0;
564 uint32_t ret_len = 0;
565 uint32_t copy_len = 0;
566 uint8_t *buf = NULL;
567
568 /* Traverse descriptors owned by CPU */
569 while ((desc_iter->RDES0.Own != EMAC_LL_DMADESC_OWNER_DMA) && (used_descs < CONFIG_ETH_DMA_RX_BUFFER_NUM)) {
570 used_descs++;
571 /* Last segment in frame */
572 if (desc_iter->RDES0.LastDescriptor) {
573 /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */
574 ret_len = desc_iter->RDES0.FrameLength - ETH_CRC_LENGTH;
575 /* packets larger than expected will be truncated */
576 copy_len = ret_len > *size ? *size : ret_len;
577 break;
578 }
579 /* point to next descriptor */
580 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
581 }
582 if (copy_len > 0) {
583 buf = malloc(copy_len);
584 if (buf != NULL) {
585 emac_hal_auto_buf_info_t *buff_info = (emac_hal_auto_buf_info_t *)buf;
586 /* no need to check allocated buffer min lenght prior writing since we know that EMAC DMA is configured to
587 not forward erroneous or undersized frames (less than 64B), see emac_hal_init_dma_default */
588 #ifndef NDEBUG
589 buff_info->magic_id = EMAC_HAL_BUF_MAGIC_ID;
590 #endif // NDEBUG
591 buff_info->copy_len = copy_len;
592 }
593 }
594 /* indicate actual size of received frame */
595 *size = ret_len;
596 return buf;
597 }
598
emac_hal_receive_frame(emac_hal_context_t * hal,uint8_t * buf,uint32_t size,uint32_t * frames_remain,uint32_t * free_desc)599 uint32_t emac_hal_receive_frame(emac_hal_context_t *hal, uint8_t *buf, uint32_t size, uint32_t *frames_remain, uint32_t *free_desc)
600 {
601 eth_dma_rx_descriptor_t *desc_iter = hal->rx_desc;
602 eth_dma_rx_descriptor_t *first_desc = hal->rx_desc;
603 uint32_t used_descs = 0;
604 uint32_t ret_len = 0;
605 uint32_t copy_len = 0;
606 uint32_t frame_count = 0;
607
608 if (size != EMAC_HAL_BUF_SIZE_AUTO) {
609 /* Traverse descriptors owned by CPU */
610 while ((desc_iter->RDES0.Own != EMAC_LL_DMADESC_OWNER_DMA) && (used_descs < CONFIG_ETH_DMA_RX_BUFFER_NUM) && !frame_count) {
611 used_descs++;
612 /* Last segment in frame */
613 if (desc_iter->RDES0.LastDescriptor) {
614 /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */
615 ret_len = desc_iter->RDES0.FrameLength - ETH_CRC_LENGTH;
616 /* packets larger than expected will be truncated */
617 copy_len = ret_len > size ? size : ret_len;
618 /* update unhandled frame count */
619 frame_count++;
620 }
621 /* First segment in frame */
622 if (desc_iter->RDES0.FirstDescriptor) {
623 first_desc = desc_iter;
624 }
625 /* point to next descriptor */
626 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
627 }
628 } else {
629 emac_hal_auto_buf_info_t *buff_info = (emac_hal_auto_buf_info_t *)buf;
630 #ifndef NDEBUG
631 /* check that buffer was allocated by emac_hal_alloc_recv_buf */
632 assert(buff_info->magic_id == EMAC_HAL_BUF_MAGIC_ID);
633 #endif // NDEBUG
634 copy_len = buff_info->copy_len;
635 ret_len = copy_len;
636 }
637
638 if (copy_len) {
639 /* check how many frames left to handle */
640 while ((desc_iter->RDES0.Own != EMAC_LL_DMADESC_OWNER_DMA) && (used_descs < CONFIG_ETH_DMA_RX_BUFFER_NUM)) {
641 used_descs++;
642 if (desc_iter->RDES0.LastDescriptor) {
643 frame_count++;
644 }
645 /* point to next descriptor */
646 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
647 }
648 desc_iter = first_desc;
649 while(copy_len > CONFIG_ETH_DMA_BUFFER_SIZE) {
650 used_descs--;
651 memcpy(buf, (void *)(desc_iter->Buffer1Addr), CONFIG_ETH_DMA_BUFFER_SIZE);
652 buf += CONFIG_ETH_DMA_BUFFER_SIZE;
653 copy_len -= CONFIG_ETH_DMA_BUFFER_SIZE;
654 /* Set Own bit in Rx descriptors: gives the buffers back to DMA */
655 desc_iter->RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
656 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
657 }
658 memcpy(buf, (void *)(desc_iter->Buffer1Addr), copy_len);
659 desc_iter->RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
660 used_descs--;
661 /* `copy_len` does not include CRC, hence check if we reached the last descriptor */
662 while (!desc_iter->RDES0.LastDescriptor) {
663 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
664 desc_iter->RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
665 used_descs--;
666 }
667 /* update rxdesc */
668 hal->rx_desc = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
669 /* poll rx demand */
670 emac_ll_receive_poll_demand(hal->dma_regs, 0);
671 frame_count--;
672 }
673 *frames_remain = frame_count;
674 *free_desc = CONFIG_ETH_DMA_RX_BUFFER_NUM - used_descs;
675 return ret_len;
676 }
677
emac_hal_flush_recv_frame(emac_hal_context_t * hal,uint32_t * frames_remain,uint32_t * free_desc)678 uint32_t emac_hal_flush_recv_frame(emac_hal_context_t *hal, uint32_t *frames_remain, uint32_t *free_desc)
679 {
680 eth_dma_rx_descriptor_t *desc_iter = hal->rx_desc;
681 eth_dma_rx_descriptor_t *first_desc = hal->rx_desc;
682 uint32_t used_descs = 0;
683 uint32_t frame_len = 0;
684 uint32_t frame_count = 0;
685
686 /* Traverse descriptors owned by CPU */
687 while ((desc_iter->RDES0.Own != EMAC_LL_DMADESC_OWNER_DMA) && (used_descs < CONFIG_ETH_DMA_RX_BUFFER_NUM) && !frame_count) {
688 used_descs++;
689 /* Last segment in frame */
690 if (desc_iter->RDES0.LastDescriptor) {
691 /* Get the Frame Length of the received packet: substruct 4 bytes of the CRC */
692 frame_len = desc_iter->RDES0.FrameLength - ETH_CRC_LENGTH;
693 /* update unhandled frame count */
694 frame_count++;
695 }
696 /* First segment in frame */
697 if (desc_iter->RDES0.FirstDescriptor) {
698 first_desc = desc_iter;
699 }
700 /* point to next descriptor */
701 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
702 }
703
704 /* if there is at least one frame waiting */
705 if (frame_len) {
706 /* check how many frames left to handle */
707 while ((desc_iter->RDES0.Own != EMAC_LL_DMADESC_OWNER_DMA) && (used_descs < CONFIG_ETH_DMA_RX_BUFFER_NUM)) {
708 used_descs++;
709 if (desc_iter->RDES0.LastDescriptor) {
710 frame_count++;
711 }
712 /* point to next descriptor */
713 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
714 }
715 desc_iter = first_desc;
716 /* return descriptors to DMA */
717 while (!desc_iter->RDES0.LastDescriptor) {
718 desc_iter->RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
719 desc_iter = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
720 used_descs--;
721 }
722 desc_iter->RDES0.Own = EMAC_LL_DMADESC_OWNER_DMA;
723 used_descs--;
724 /* update rxdesc */
725 hal->rx_desc = (eth_dma_rx_descriptor_t *)(desc_iter->Buffer2NextDescAddr);
726 /* poll rx demand */
727 emac_ll_receive_poll_demand(hal->dma_regs, 0);
728 frame_count--;
729 }
730 *frames_remain = frame_count;
731 *free_desc = CONFIG_ETH_DMA_RX_BUFFER_NUM - used_descs;
732 return frame_len;
733 }
734