1 /*
2 * Copyright (c) 2022 Grant Ramsay <grant.ramsay@hotmail.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT espressif_esp32_eth
8
9 #include <ethernet/eth_stats.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/net/ethernet.h>
14 #include <zephyr/net/phy.h>
15
16 #include <esp_attr.h>
17 #include <esp_mac.h>
18 #include <hal/emac_hal.h>
19 #include <hal/emac_ll.h>
20 #include <soc/rtc.h>
21
22 #include "eth.h"
23
24 LOG_MODULE_REGISTER(eth_esp32, CONFIG_ETHERNET_LOG_LEVEL);
25
26 #define MAC_RESET_TIMEOUT_MS 100
27
28 struct eth_esp32_dma_data {
29 uint8_t descriptors[
30 CONFIG_ETH_DMA_RX_BUFFER_NUM * sizeof(eth_dma_rx_descriptor_t) +
31 CONFIG_ETH_DMA_TX_BUFFER_NUM * sizeof(eth_dma_tx_descriptor_t)];
32 uint8_t rx_buf[CONFIG_ETH_DMA_RX_BUFFER_NUM][CONFIG_ETH_DMA_BUFFER_SIZE];
33 uint8_t tx_buf[CONFIG_ETH_DMA_TX_BUFFER_NUM][CONFIG_ETH_DMA_BUFFER_SIZE];
34 };
35
36 struct eth_esp32_dev_data {
37 struct net_if *iface;
38 uint8_t mac_addr[6];
39 emac_hal_context_t hal;
40 struct eth_esp32_dma_data *dma;
41 uint8_t txb[NET_ETH_MAX_FRAME_SIZE];
42 uint8_t rxb[NET_ETH_MAX_FRAME_SIZE];
43 uint8_t *dma_rx_buf[CONFIG_ETH_DMA_RX_BUFFER_NUM];
44 uint8_t *dma_tx_buf[CONFIG_ETH_DMA_TX_BUFFER_NUM];
45 struct k_sem int_sem;
46
47 K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_ESP32_RX_THREAD_STACK_SIZE);
48 struct k_thread rx_thread;
49 };
50
51 static const struct device *eth_esp32_phy_dev = DEVICE_DT_GET(
52 DT_INST_PHANDLE(0, phy_handle));
53
eth_esp32_caps(const struct device * dev)54 static enum ethernet_hw_caps eth_esp32_caps(const struct device *dev)
55 {
56 ARG_UNUSED(dev);
57 return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T;
58 }
59
eth_esp32_send(const struct device * dev,struct net_pkt * pkt)60 static int eth_esp32_send(const struct device *dev, struct net_pkt *pkt)
61 {
62 struct eth_esp32_dev_data *dev_data = dev->data;
63 size_t len = net_pkt_get_len(pkt);
64
65 if (net_pkt_read(pkt, dev_data->txb, len)) {
66 return -EIO;
67 }
68
69 uint32_t sent_len = emac_hal_transmit_frame(&dev_data->hal, dev_data->txb, len);
70
71 int res = len == sent_len ? 0 : -EIO;
72
73 return res;
74 }
75
eth_esp32_rx(struct eth_esp32_dev_data * const dev_data,uint32_t * frames_remaining)76 static struct net_pkt *eth_esp32_rx(
77 struct eth_esp32_dev_data *const dev_data, uint32_t *frames_remaining)
78 {
79 uint32_t free_rx_descriptor;
80 uint32_t receive_len = emac_hal_receive_frame(
81 &dev_data->hal, dev_data->rxb, sizeof(dev_data->rxb),
82 frames_remaining, &free_rx_descriptor);
83 if (receive_len == 0) {
84 /* Nothing to receive */
85 return NULL;
86 }
87
88 struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer(
89 dev_data->iface, receive_len, AF_UNSPEC, 0, K_MSEC(100));
90 if (pkt == NULL) {
91 eth_stats_update_errors_rx(ctx->iface);
92 LOG_ERR("Could not allocate rx buffer");
93 return NULL;
94 }
95
96 if (net_pkt_write(pkt, dev_data->rxb, receive_len) != 0) {
97 LOG_ERR("Unable to write frame into the pkt");
98 eth_stats_update_errors_rx(ctx->iface);
99 net_pkt_unref(pkt);
100 return NULL;
101 }
102
103 return pkt;
104 }
105
eth_esp32_rx_thread(void * arg1,void * arg2,void * arg3)106 FUNC_NORETURN static void eth_esp32_rx_thread(void *arg1, void *arg2, void *arg3)
107 {
108 const struct device *dev = arg1;
109 struct eth_esp32_dev_data *const dev_data = dev->data;
110
111 ARG_UNUSED(arg2);
112 ARG_UNUSED(arg3);
113
114 while (true) {
115 k_sem_take(&dev_data->int_sem, K_FOREVER);
116
117 uint32_t frames_remaining;
118
119 do {
120 struct net_pkt *pkt = eth_esp32_rx(
121 dev_data, &frames_remaining);
122 if (pkt == NULL) {
123 break;
124 }
125
126 if (net_recv_data(dev_data->iface, pkt) < 0) {
127 /* Upper layers are not ready to receive packets */
128 net_pkt_unref(pkt);
129 }
130 } while (frames_remaining > 0);
131 }
132 }
133
eth_esp32_isr(void * arg)134 IRAM_ATTR static void eth_esp32_isr(void *arg)
135 {
136 const struct device *dev = arg;
137 struct eth_esp32_dev_data *const dev_data = dev->data;
138 uint32_t intr_stat = emac_ll_get_intr_status(dev_data->hal.dma_regs);
139
140 emac_ll_clear_corresponding_intr(dev_data->hal.dma_regs, intr_stat);
141
142 if (intr_stat & EMAC_LL_DMA_RECEIVE_FINISH_INTR) {
143 k_sem_give(&dev_data->int_sem);
144 }
145 }
146
generate_mac_addr(uint8_t mac_addr[6])147 static int generate_mac_addr(uint8_t mac_addr[6])
148 {
149 int res = 0;
150 #if DT_INST_PROP(0, zephyr_random_mac_address)
151 gen_random_mac(mac_addr, 0x24, 0xD7, 0xEB);
152 #elif NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
153 static const uint8_t addr[6] = DT_INST_PROP(0, local_mac_address);
154
155 memcpy(mac_addr, addr, sizeof(addr));
156 #else
157 if (esp_read_mac(mac_addr, ESP_MAC_ETH) != ESP_OK) {
158 res = -EIO;
159 }
160 #endif
161 return res;
162 }
163
phy_link_state_changed(const struct device * phy_dev,struct phy_link_state * state,void * user_data)164 static void phy_link_state_changed(const struct device *phy_dev,
165 struct phy_link_state *state,
166 void *user_data)
167 {
168 const struct device *dev = (const struct device *)user_data;
169 struct eth_esp32_dev_data *const dev_data = dev->data;
170
171 ARG_UNUSED(phy_dev);
172
173 if (state->is_up) {
174 net_eth_carrier_on(dev_data->iface);
175 } else {
176 net_eth_carrier_off(dev_data->iface);
177 }
178 }
179
eth_esp32_initialize(const struct device * dev)180 int eth_esp32_initialize(const struct device *dev)
181 {
182 struct eth_esp32_dev_data *const dev_data = dev->data;
183 int res;
184
185 k_sem_init(&dev_data->int_sem, 0, 1);
186
187 const struct device *clock_dev =
188 DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_NODELABEL(eth)));
189 clock_control_subsys_t clock_subsys =
190 (clock_control_subsys_t)DT_CLOCKS_CELL(DT_NODELABEL(eth), offset);
191
192 res = clock_control_on(clock_dev, clock_subsys);
193 if (res != 0) {
194 goto err;
195 }
196
197 /* Convert 2D array DMA buffers to arrays of pointers */
198 for (int i = 0; i < CONFIG_ETH_DMA_RX_BUFFER_NUM; i++) {
199 dev_data->dma_rx_buf[i] = dev_data->dma->rx_buf[i];
200 }
201 for (int i = 0; i < CONFIG_ETH_DMA_TX_BUFFER_NUM; i++) {
202 dev_data->dma_tx_buf[i] = dev_data->dma->tx_buf[i];
203 }
204
205 emac_hal_init(&dev_data->hal, dev_data->dma->descriptors,
206 dev_data->dma_rx_buf, dev_data->dma_tx_buf);
207
208 /* Configure ISR */
209 res = esp_intr_alloc(DT_IRQN(DT_NODELABEL(eth)),
210 ESP_INTR_FLAG_IRAM,
211 eth_esp32_isr,
212 (void *)dev,
213 NULL);
214 if (res != 0) {
215 goto err;
216 }
217
218 /* Configure phy for Media-Independent Interface (MII) or
219 * Reduced Media-Independent Interface (RMII) mode
220 */
221 const char *phy_connection_type = DT_INST_PROP_OR(0,
222 phy_connection_type,
223 "rmii");
224
225 if (strcmp(phy_connection_type, "rmii") == 0) {
226 emac_hal_iomux_init_rmii();
227 #if DT_INST_NODE_HAS_PROP(0, ref_clk_output_gpios)
228 BUILD_ASSERT(DT_INST_GPIO_PIN(0, ref_clk_output_gpios) == 16 ||
229 DT_INST_GPIO_PIN(0, ref_clk_output_gpios) == 17,
230 "Only GPIO16/17 are allowed as a GPIO REF_CLK source!");
231 int ref_clk_gpio = DT_INST_GPIO_PIN(0, ref_clk_output_gpios);
232
233 emac_hal_iomux_rmii_clk_output(ref_clk_gpio);
234 emac_ll_clock_enable_rmii_output(dev_data->hal.ext_regs);
235 rtc_clk_apll_enable(true, 0, 0, 6, 2);
236 #else
237 emac_hal_iomux_rmii_clk_input();
238 emac_ll_clock_enable_rmii_input(dev_data->hal.ext_regs);
239 #endif
240 } else if (strcmp(phy_connection_type, "mii") == 0) {
241 emac_hal_iomux_init_mii();
242 emac_ll_clock_enable_mii(dev_data->hal.ext_regs);
243 } else {
244 res = -EINVAL;
245 goto err;
246 }
247
248 /* Reset mac registers and wait until ready */
249 emac_ll_reset(dev_data->hal.dma_regs);
250 bool reset_success = false;
251
252 for (uint32_t t_ms = 0; t_ms < MAC_RESET_TIMEOUT_MS; t_ms += 10) {
253 /* Busy wait rather than sleep in case kernel is not yet initialized */
254 k_busy_wait(10 * 1000);
255 if (emac_ll_is_reset_done(dev_data->hal.dma_regs)) {
256 reset_success = true;
257 break;
258 }
259 }
260 if (!reset_success) {
261 res = -ETIMEDOUT;
262 goto err;
263 }
264
265 emac_hal_reset_desc_chain(&dev_data->hal);
266 emac_hal_init_mac_default(&dev_data->hal);
267 emac_hal_init_dma_default(&dev_data->hal);
268
269 res = generate_mac_addr(dev_data->mac_addr);
270 if (res != 0) {
271 goto err;
272 }
273 emac_hal_set_address(&dev_data->hal, dev_data->mac_addr);
274
275 k_tid_t tid = k_thread_create(
276 &dev_data->rx_thread, dev_data->rx_thread_stack,
277 K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack),
278 eth_esp32_rx_thread,
279 (void *)dev, NULL, NULL,
280 CONFIG_ETH_ESP32_RX_THREAD_PRIORITY,
281 K_ESSENTIAL, K_NO_WAIT);
282 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
283 k_thread_name_set(tid, "esp32_eth");
284 }
285
286 emac_hal_start(&dev_data->hal);
287
288 return 0;
289
290 err:
291 return res;
292 }
293
eth_esp32_iface_init(struct net_if * iface)294 static void eth_esp32_iface_init(struct net_if *iface)
295 {
296 const struct device *dev = net_if_get_device(iface);
297 struct eth_esp32_dev_data *dev_data = dev->data;
298
299 dev_data->iface = iface;
300
301 net_if_set_link_addr(iface, dev_data->mac_addr,
302 sizeof(dev_data->mac_addr),
303 NET_LINK_ETHERNET);
304
305 ethernet_init(iface);
306
307 if (device_is_ready(eth_esp32_phy_dev)) {
308 phy_link_callback_set(eth_esp32_phy_dev, phy_link_state_changed,
309 (void *)dev);
310 } else {
311 LOG_ERR("PHY device not ready");
312 }
313
314 /* Do not start the interface until PHY link is up */
315 net_if_carrier_off(iface);
316 }
317
318 static const struct ethernet_api eth_esp32_api = {
319 .iface_api.init = eth_esp32_iface_init,
320 .get_capabilities = eth_esp32_caps,
321 .send = eth_esp32_send,
322 };
323
324 /* DMA data must be in DRAM */
325 static struct eth_esp32_dma_data eth_esp32_dma_data WORD_ALIGNED_ATTR DRAM_ATTR;
326
327 static struct eth_esp32_dev_data eth_esp32_dev = {
328 .dma = ð_esp32_dma_data,
329 };
330
331 ETH_NET_DEVICE_DT_INST_DEFINE(0,
332 eth_esp32_initialize,
333 NULL,
334 ð_esp32_dev,
335 NULL,
336 CONFIG_ETH_INIT_PRIORITY,
337 ð_esp32_api,
338 NET_ETH_MTU);
339