1 /*
2 * Copyright (c) 2019 Interay Solutions B.V.
3 * Copyright (c) 2019 Oane Kingma
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT silabs_gecko_ethernet
9
10 /* Silicon Labs EFM32 Giant Gecko 11 Ethernet driver.
11 * Limitations:
12 * - no link monitoring through PHY interrupt
13 */
14
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(eth_gecko, CONFIG_ETHERNET_LOG_LEVEL);
17
18 #include <soc.h>
19 #include <zephyr/device.h>
20 #include <zephyr/init.h>
21 #include <zephyr/kernel.h>
22 #include <errno.h>
23 #include <zephyr/net/net_pkt.h>
24 #include <zephyr/net/net_if.h>
25 #include <zephyr/net/ethernet.h>
26 #include <ethernet/eth_stats.h>
27 #include <em_cmu.h>
28 #include <zephyr/irq.h>
29
30 #include "phy_gecko.h"
31 #include "eth_gecko_priv.h"
32
33 #include "eth.h"
34
35 static uint8_t dma_tx_buffer[ETH_TX_BUF_COUNT][ETH_TX_BUF_SIZE]
36 __aligned(ETH_BUF_ALIGNMENT);
37 static uint8_t dma_rx_buffer[ETH_RX_BUF_COUNT][ETH_RX_BUF_SIZE]
38 __aligned(ETH_BUF_ALIGNMENT);
39 static struct eth_buf_desc dma_tx_desc_tab[ETH_TX_BUF_COUNT]
40 __aligned(ETH_DESC_ALIGNMENT);
41 static struct eth_buf_desc dma_rx_desc_tab[ETH_RX_BUF_COUNT]
42 __aligned(ETH_DESC_ALIGNMENT);
43 static uint32_t tx_buf_idx;
44 static uint32_t rx_buf_idx;
45
46
link_configure(ETH_TypeDef * eth,uint32_t flags)47 static void link_configure(ETH_TypeDef *eth, uint32_t flags)
48 {
49 uint32_t val;
50
51 __ASSERT_NO_MSG(eth != NULL);
52
53 /* Disable receiver & transmitter */
54 eth->NETWORKCTRL &= ~(ETH_NETWORKCTRL_ENBTX | ETH_NETWORKCTRL_ENBRX);
55
56 /* Set duplex mode and speed */
57 val = eth->NETWORKCFG;
58 val &= ~(_ETH_NETWORKCFG_FULLDUPLEX_MASK | _ETH_NETWORKCFG_SPEED_MASK);
59 val |= flags &
60 (_ETH_NETWORKCFG_FULLDUPLEX_MASK | _ETH_NETWORKCFG_SPEED_MASK);
61 eth->NETWORKCFG = val;
62
63 /* Enable transmitter and receiver */
64 eth->NETWORKCTRL |= (ETH_NETWORKCTRL_ENBTX | ETH_NETWORKCTRL_ENBRX);
65 }
66
eth_gecko_setup_mac(const struct device * dev)67 static void eth_gecko_setup_mac(const struct device *dev)
68 {
69 const struct eth_gecko_dev_cfg *const cfg = dev->config;
70 ETH_TypeDef *eth = cfg->regs;
71 uint32_t link_status;
72 int result;
73
74 /* PHY auto-negotiate link parameters */
75 result = phy_gecko_auto_negotiate(&cfg->phy, &link_status);
76 if (result < 0) {
77 LOG_ERR("ETH PHY auto-negotiate sequence failed");
78 return;
79 }
80
81 LOG_INF("Speed %s Mb",
82 link_status & ETH_NETWORKCFG_SPEED ? "100" : "10");
83 LOG_INF("%s duplex",
84 link_status & ETH_NETWORKCFG_FULLDUPLEX ? "Full" : "Half");
85
86 /* Set up link parameters and enable receiver/transmitter */
87 link_configure(eth, link_status);
88 }
89
eth_init_tx_buf_desc(void)90 static void eth_init_tx_buf_desc(void)
91 {
92 uint32_t address;
93 int i;
94
95 /* Initialize TX buffer descriptors */
96 for (i = 0; i < ETH_TX_BUF_COUNT; i++) {
97 address = (uint32_t) dma_tx_buffer[i];
98 dma_tx_desc_tab[i].address = address;
99 dma_tx_desc_tab[i].status = ETH_TX_USED;
100 }
101
102 /* Mark last descriptor entry with wrap flag */
103 dma_tx_desc_tab[i - 1].status |= ETH_TX_WRAP;
104 tx_buf_idx = 0;
105 }
106
eth_init_rx_buf_desc(void)107 static void eth_init_rx_buf_desc(void)
108 {
109 uint32_t address;
110 int i;
111
112 for (i = 0; i < ETH_RX_BUF_COUNT; i++) {
113 address = (uint32_t) dma_rx_buffer[i];
114 dma_rx_desc_tab[i].address = address & ETH_RX_ADDRESS;
115 dma_rx_desc_tab[i].status = 0;
116 }
117
118 /* Mark last descriptor entry with wrap flag */
119 dma_rx_desc_tab[i - 1].address |= ETH_RX_WRAP;
120 rx_buf_idx = 0;
121 }
122
rx_error_handler(ETH_TypeDef * eth)123 static void rx_error_handler(ETH_TypeDef *eth)
124 {
125 __ASSERT_NO_MSG(eth != NULL);
126
127 /* Stop reception */
128 ETH_RX_DISABLE(eth);
129
130 /* Reset RX buffer descriptor list */
131 eth_init_rx_buf_desc();
132 eth->RXQPTR = (uint32_t)dma_rx_desc_tab;
133
134 /* Restart reception */
135 ETH_RX_ENABLE(eth);
136 }
137
frame_get(const struct device * dev)138 static struct net_pkt *frame_get(const struct device *dev)
139 {
140 struct eth_gecko_dev_data *const dev_data = dev->data;
141 const struct eth_gecko_dev_cfg *const cfg = dev->config;
142 ETH_TypeDef *eth = cfg->regs;
143 struct net_pkt *rx_frame = NULL;
144 uint16_t frag_len, total_len;
145 uint32_t sofIdx, eofIdx;
146 uint32_t i, j;
147
148 __ASSERT_NO_MSG(dev != NULL);
149 __ASSERT_NO_MSG(dev_data != NULL);
150 __ASSERT_NO_MSG(cfg != NULL);
151
152 /* Preset indices and total frame length */
153 sofIdx = UINT32_MAX;
154 eofIdx = UINT32_MAX;
155 total_len = 0;
156
157 /* Check if a full frame is received (SOF/EOF present)
158 * and determine total length of frame
159 */
160 for (i = 0; i < ETH_RX_BUF_COUNT; i++) {
161 j = (i + rx_buf_idx);
162 if (j >= ETH_RX_BUF_COUNT) {
163 j -= ETH_RX_BUF_COUNT;
164 }
165
166 /* Verify it is an ETH owned buffer */
167 if (!(dma_rx_desc_tab[j].address & ETH_RX_OWNERSHIP)) {
168 /* No more ETH owned buffers to process */
169 break;
170 }
171
172 /* Check for SOF */
173 if (dma_rx_desc_tab[j].status & ETH_RX_SOF) {
174 sofIdx = j;
175 }
176
177 if (sofIdx != UINT32_MAX) {
178 total_len += (dma_rx_desc_tab[j].status &
179 ETH_RX_LENGTH);
180
181 /* Check for EOF */
182 if (dma_rx_desc_tab[j].status & ETH_RX_EOF) {
183 eofIdx = j;
184 break;
185 }
186 }
187 }
188
189 LOG_DBG("sof/eof: %u/%u, rx_buf_idx: %u, len: %u", sofIdx, eofIdx,
190 rx_buf_idx, total_len);
191
192 /* Verify we found a full frame */
193 if (eofIdx != UINT32_MAX) {
194 /* Allocate room for full frame */
195 rx_frame = net_pkt_rx_alloc_with_buffer(dev_data->iface,
196 total_len, AF_UNSPEC, 0, K_NO_WAIT);
197 if (!rx_frame) {
198 LOG_ERR("Failed to obtain RX buffer");
199 ETH_RX_DISABLE(eth);
200 eth_init_rx_buf_desc();
201 eth->RXQPTR = (uint32_t)dma_rx_desc_tab;
202 ETH_RX_ENABLE(eth);
203 return rx_frame;
204 }
205
206 /* Copy frame (fragments)*/
207 j = sofIdx;
208 while (total_len) {
209 frag_len = MIN(total_len, ETH_RX_BUF_SIZE);
210 LOG_DBG("frag: %u, fraglen: %u, rx_buf_idx: %u", j,
211 frag_len, rx_buf_idx);
212 if (net_pkt_write(rx_frame, &dma_rx_buffer[j],
213 frag_len) < 0) {
214 LOG_ERR("Failed to append RX buffer");
215 dma_rx_desc_tab[j].address &=
216 ~ETH_RX_OWNERSHIP;
217 net_pkt_unref(rx_frame);
218 rx_frame = NULL;
219 break;
220 }
221
222 dma_rx_desc_tab[j].address &= ~ETH_RX_OWNERSHIP;
223
224 total_len -= frag_len;
225 if (++j >= ETH_RX_BUF_COUNT) {
226 j -= ETH_RX_BUF_COUNT;
227 }
228
229 if (++rx_buf_idx >= ETH_RX_BUF_COUNT) {
230 rx_buf_idx -= ETH_RX_BUF_COUNT;
231 }
232 }
233 }
234
235 return rx_frame;
236 }
237
eth_rx(const struct device * dev)238 static void eth_rx(const struct device *dev)
239 {
240 struct eth_gecko_dev_data *const dev_data = dev->data;
241 struct net_pkt *rx_frame;
242 int res = 0;
243
244 __ASSERT_NO_MSG(dev != NULL);
245 __ASSERT_NO_MSG(dev_data != NULL);
246
247 /* Iterate across (possibly multiple) frames */
248 rx_frame = frame_get(dev);
249 while (rx_frame) {
250 /* All data for this frame received */
251 res = net_recv_data(dev_data->iface, rx_frame);
252 if (res < 0) {
253 LOG_ERR("Failed to enqueue frame into RX queue: %d",
254 res);
255 eth_stats_update_errors_rx(dev_data->iface);
256 net_pkt_unref(rx_frame);
257 }
258
259 /* Check if more frames are received */
260 rx_frame = frame_get(dev);
261 }
262 }
263
eth_tx(const struct device * dev,struct net_pkt * pkt)264 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
265 {
266 struct eth_gecko_dev_data *const dev_data = dev->data;
267 const struct eth_gecko_dev_cfg *const cfg = dev->config;
268 ETH_TypeDef *eth = cfg->regs;
269 uint16_t total_len;
270 uint8_t *dma_buffer;
271 int res = 0;
272
273 __ASSERT_NO_MSG(dev != NULL);
274 __ASSERT_NO_MSG(dev_data != NULL);
275 __ASSERT_NO_MSG(cfg != NULL);
276
277 __ASSERT(pkt, "Buf pointer is NULL");
278 __ASSERT(pkt->frags, "Frame data missing");
279
280 /* Determine length of frame */
281 total_len = net_pkt_get_len(pkt);
282 if (total_len > ETH_TX_BUF_SIZE) {
283 LOG_ERR("PKT to big");
284 res = -EIO;
285 goto error;
286 }
287
288 if (k_sem_take(&dev_data->tx_sem, K_MSEC(100)) != 0) {
289 LOG_ERR("TX process did not complete within 100ms");
290 res = -EIO;
291 goto error;
292 }
293
294 /* Make sure current buffer is available for writing */
295 if (!(dma_tx_desc_tab[tx_buf_idx].status & ETH_TX_USED)) {
296 LOG_ERR("Buffer already in use");
297 res = -EIO;
298 goto error;
299 }
300
301 dma_buffer = (uint8_t *)dma_tx_desc_tab[tx_buf_idx].address;
302 if (net_pkt_read(pkt, dma_buffer, total_len)) {
303 LOG_ERR("Failed to read packet into buffer");
304 res = -EIO;
305 goto error;
306 }
307
308 if (tx_buf_idx < (ETH_TX_BUF_COUNT - 1)) {
309 dma_tx_desc_tab[tx_buf_idx].status =
310 (total_len & ETH_TX_LENGTH) | ETH_TX_LAST;
311 tx_buf_idx++;
312 } else {
313 dma_tx_desc_tab[tx_buf_idx].status =
314 (total_len & ETH_TX_LENGTH) | (ETH_TX_LAST |
315 ETH_TX_WRAP);
316 tx_buf_idx = 0;
317 }
318
319 /* Kick off transmission */
320 eth->NETWORKCTRL |= ETH_NETWORKCTRL_TXSTRT;
321
322 error:
323 return res;
324 }
325
rx_thread(void * arg1,void * unused1,void * unused2)326 static void rx_thread(void *arg1, void *unused1, void *unused2)
327 {
328 const struct device *dev = (const struct device *)arg1;
329 struct eth_gecko_dev_data *const dev_data = dev->data;
330 const struct eth_gecko_dev_cfg *const cfg = dev->config;
331 int res;
332
333 __ASSERT_NO_MSG(arg1 != NULL);
334 ARG_UNUSED(unused1);
335 ARG_UNUSED(unused2);
336 __ASSERT_NO_MSG(dev_data != NULL);
337 __ASSERT_NO_MSG(cfg != NULL);
338
339 while (1) {
340 res = k_sem_take(&dev_data->rx_sem, K_MSEC(
341 CONFIG_ETH_GECKO_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS));
342 if (res == 0) {
343 if (dev_data->link_up != true) {
344 dev_data->link_up = true;
345 LOG_INF("Link up");
346 eth_gecko_setup_mac(dev);
347 net_eth_carrier_on(dev_data->iface);
348 }
349
350 /* Process received data */
351 eth_rx(dev);
352 } else if (res == -EAGAIN) {
353 if (phy_gecko_is_linked(&cfg->phy)) {
354 if (dev_data->link_up != true) {
355 dev_data->link_up = true;
356 LOG_INF("Link up");
357 eth_gecko_setup_mac(dev);
358 net_eth_carrier_on(dev_data->iface);
359 }
360 } else {
361 if (dev_data->link_up != false) {
362 dev_data->link_up = false;
363 LOG_INF("Link down");
364 net_eth_carrier_off(dev_data->iface);
365 }
366 }
367 }
368 }
369 }
370
eth_isr(const struct device * dev)371 static void eth_isr(const struct device *dev)
372 {
373 struct eth_gecko_dev_data *const dev_data = dev->data;
374 const struct eth_gecko_dev_cfg *const cfg = dev->config;
375 ETH_TypeDef *eth = cfg->regs;
376 uint32_t int_clr = 0;
377 uint32_t int_stat = eth->IFCR;
378 uint32_t tx_irq_mask = (ETH_IENS_TXCMPLT | ETH_IENS_TXUNDERRUN |
379 ETH_IENS_RTRYLMTORLATECOL |
380 ETH_IENS_TXUSEDBITREAD |
381 ETH_IENS_AMBAERR);
382 uint32_t rx_irq_mask = (ETH_IENS_RXCMPLT | ETH_IENS_RXUSEDBITREAD);
383
384 __ASSERT_NO_MSG(dev_data != NULL);
385 __ASSERT_NO_MSG(cfg != NULL);
386
387 /* Receive handling */
388 if (int_stat & rx_irq_mask) {
389 if (int_stat & ETH_IENS_RXCMPLT) {
390 /* Receive complete */
391 k_sem_give(&dev_data->rx_sem);
392 } else {
393 /* Receive error */
394 LOG_DBG("RX Error");
395 rx_error_handler(eth);
396 }
397
398 int_clr |= rx_irq_mask;
399 }
400
401 /* Transmit handling */
402 if (int_stat & tx_irq_mask) {
403 if (int_stat & ETH_IENS_TXCMPLT) {
404 /* Transmit complete */
405 } else {
406 /* Transmit error: no actual handling, the current
407 * buffer is no longer used and we release the
408 * semaphore which signals the user thread to
409 * start TX of a new packet
410 */
411 }
412
413 int_clr |= tx_irq_mask;
414
415 /* Signal TX thread we're ready to start transmission */
416 k_sem_give(&dev_data->tx_sem);
417 }
418
419 /* Clear interrupts */
420 eth->IFCR = int_clr;
421 }
422
eth_init_clocks(const struct device * dev)423 static void eth_init_clocks(const struct device *dev)
424 {
425 __ASSERT_NO_MSG(dev != NULL);
426
427 CMU_ClockEnable(cmuClock_HFPER, true);
428 CMU_ClockEnable(cmuClock_ETH, true);
429 }
430
eth_init_pins(const struct device * dev)431 static void eth_init_pins(const struct device *dev)
432 {
433 const struct eth_gecko_dev_cfg *const cfg = dev->config;
434 ETH_TypeDef *eth = cfg->regs;
435 uint32_t idx;
436
437 __ASSERT_NO_MSG(dev != NULL);
438 __ASSERT_NO_MSG(cfg != NULL);
439
440 eth->ROUTELOC1 = 0;
441 eth->ROUTEPEN = 0;
442
443 #if DT_INST_NODE_HAS_PROP(0, location_rmii)
444 for (idx = 0; idx < ARRAY_SIZE(cfg->pin_list->rmii); idx++) {
445 GPIO_PinModeSet(cfg->pin_list->rmii[idx].port, cfg->pin_list->rmii[idx].pin,
446 cfg->pin_list->rmii[idx].mode, cfg->pin_list->rmii[idx].out);
447 }
448
449 eth->ROUTELOC1 |= (DT_INST_PROP(0, location_rmii) <<
450 _ETH_ROUTELOC1_RMIILOC_SHIFT);
451 eth->ROUTEPEN |= ETH_ROUTEPEN_RMIIPEN;
452 #endif
453
454 #if DT_INST_NODE_HAS_PROP(0, location_mdio)
455 for (idx = 0; idx < ARRAY_SIZE(cfg->pin_list->mdio); idx++) {
456 GPIO_PinModeSet(cfg->pin_list->mdio[idx].port, cfg->pin_list->mdio[idx].pin,
457 cfg->pin_list->mdio[idx].mode, cfg->pin_list->mdio[idx].out);
458 }
459
460 eth->ROUTELOC1 |= (DT_INST_PROP(0, location_mdio) <<
461 _ETH_ROUTELOC1_MDIOLOC_SHIFT);
462 eth->ROUTEPEN |= ETH_ROUTEPEN_MDIOPEN;
463 #endif
464
465 }
466
eth_init(const struct device * dev)467 static int eth_init(const struct device *dev)
468 {
469 const struct eth_gecko_dev_cfg *const cfg = dev->config;
470 ETH_TypeDef *eth = cfg->regs;
471
472 __ASSERT_NO_MSG(dev != NULL);
473 __ASSERT_NO_MSG(cfg != NULL);
474
475 /* Enable clocks */
476 eth_init_clocks(dev);
477
478 /* Connect pins to peripheral */
479 eth_init_pins(dev);
480
481 #if DT_INST_NODE_HAS_PROP(0, location_rmii)
482 /* Enable global clock and RMII operation */
483 eth->CTRL = ETH_CTRL_GBLCLKEN | ETH_CTRL_MIISEL_RMII;
484 #endif
485
486 /* Connect and enable IRQ */
487 cfg->config_func();
488
489 LOG_INF("Device %s initialized", dev->name);
490
491 return 0;
492 }
493
generate_mac(uint8_t mac_addr[6])494 static void generate_mac(uint8_t mac_addr[6])
495 {
496 #if DT_INST_PROP(0, zephyr_random_mac_address)
497 gen_random_mac(mac_addr, SILABS_OUI_B0, SILABS_OUI_B1, SILABS_OUI_B2);
498 #elif !NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
499 mac_addr[0] = DEVINFO->EUI48H >> 8;
500 mac_addr[1] = DEVINFO->EUI48H >> 0;
501 mac_addr[2] = DEVINFO->EUI48L >> 24;
502 mac_addr[3] = DEVINFO->EUI48L >> 16;
503 mac_addr[4] = DEVINFO->EUI48L >> 8;
504 mac_addr[5] = DEVINFO->EUI48L >> 0;
505 #endif
506 }
507
eth_iface_init(struct net_if * iface)508 static void eth_iface_init(struct net_if *iface)
509 {
510 const struct device *dev = net_if_get_device(iface);
511 struct eth_gecko_dev_data *const dev_data = dev->data;
512 const struct eth_gecko_dev_cfg *const cfg = dev->config;
513 ETH_TypeDef *eth = cfg->regs;
514 int result;
515
516 __ASSERT_NO_MSG(iface != NULL);
517 __ASSERT_NO_MSG(dev != NULL);
518 __ASSERT_NO_MSG(dev_data != NULL);
519 __ASSERT_NO_MSG(cfg != NULL);
520
521 LOG_DBG("eth_initialize");
522
523 dev_data->iface = iface;
524 dev_data->link_up = false;
525 ethernet_init(iface);
526
527 net_if_carrier_off(iface);
528
529 /* Generate MAC address, possibly used for filtering */
530 generate_mac(dev_data->mac_addr);
531
532 /* Set link address */
533 LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x",
534 dev_data->mac_addr[0], dev_data->mac_addr[1],
535 dev_data->mac_addr[2], dev_data->mac_addr[3],
536 dev_data->mac_addr[4], dev_data->mac_addr[5]);
537
538 net_if_set_link_addr(iface, dev_data->mac_addr,
539 sizeof(dev_data->mac_addr), NET_LINK_ETHERNET);
540
541 /* Disable transmit and receive circuits */
542 eth->NETWORKCTRL = 0;
543 eth->NETWORKCFG = 0;
544
545 /* Filtering MAC addresses */
546 eth->SPECADDR1BOTTOM =
547 (dev_data->mac_addr[0] << 0) |
548 (dev_data->mac_addr[1] << 8) |
549 (dev_data->mac_addr[2] << 16) |
550 (dev_data->mac_addr[3] << 24);
551 eth->SPECADDR1TOP =
552 (dev_data->mac_addr[4] << 0) |
553 (dev_data->mac_addr[5] << 8);
554
555 eth->SPECADDR2BOTTOM = 0;
556 eth->SPECADDR3BOTTOM = 0;
557 eth->SPECADDR4BOTTOM = 0;
558
559 /* Initialise hash table */
560 eth->HASHBOTTOM = 0;
561 eth->HASHTOP = 0;
562
563 /* Initialise DMA buffers */
564 eth_init_tx_buf_desc();
565 eth_init_rx_buf_desc();
566
567 /* Point to locations of TX/RX DMA descriptor lists */
568 eth->TXQPTR = (uint32_t)dma_tx_desc_tab;
569 eth->RXQPTR = (uint32_t)dma_rx_desc_tab;
570
571 /* DMA RX size configuration */
572 eth->DMACFG = (eth->DMACFG & ~_ETH_DMACFG_RXBUFSIZE_MASK) |
573 ((ETH_RX_BUF_SIZE / 64) << _ETH_DMACFG_RXBUFSIZE_SHIFT);
574
575 /* Clear status/interrupt registers */
576 eth->IFCR |= _ETH_IFCR_MASK;
577 eth->TXSTATUS = ETH_TXSTATUS_TXUNDERRUN | ETH_TXSTATUS_TXCMPLT |
578 ETH_TXSTATUS_AMBAERR | ETH_TXSTATUS_TXGO |
579 ETH_TXSTATUS_RETRYLMTEXCD | ETH_TXSTATUS_COLOCCRD |
580 ETH_TXSTATUS_USEDBITREAD;
581 eth->RXSTATUS = ETH_RXSTATUS_RESPNOTOK | ETH_RXSTATUS_RXOVERRUN |
582 ETH_RXSTATUS_FRMRX | ETH_RXSTATUS_BUFFNOTAVAIL;
583
584 /* Enable interrupts */
585 eth->IENS = ETH_IENS_RXCMPLT |
586 ETH_IENS_RXUSEDBITREAD |
587 ETH_IENS_TXCMPLT |
588 ETH_IENS_TXUNDERRUN |
589 ETH_IENS_RTRYLMTORLATECOL |
590 ETH_IENS_TXUSEDBITREAD |
591 ETH_IENS_AMBAERR;
592
593 /* Additional DMA configuration */
594 eth->DMACFG |= _ETH_DMACFG_AMBABRSTLEN_MASK |
595 ETH_DMACFG_FRCDISCARDONERR |
596 ETH_DMACFG_TXPBUFTCPEN;
597 eth->DMACFG &= ~ETH_DMACFG_HDRDATASPLITEN;
598
599 /* Set network configuration */
600 eth->NETWORKCFG |= ETH_NETWORKCFG_FCSREMOVE |
601 ETH_NETWORKCFG_UNICASTHASHEN |
602 ETH_NETWORKCFG_MULTICASTHASHEN |
603 ETH_NETWORKCFG_RX1536BYTEFRAMES |
604 ETH_NETWORKCFG_RXCHKSUMOFFLOADEN;
605
606 /* Setup PHY management port */
607 eth->NETWORKCFG |= (4 << _ETH_NETWORKCFG_MDCCLKDIV_SHIFT) &
608 _ETH_NETWORKCFG_MDCCLKDIV_MASK;
609 eth->NETWORKCTRL |= ETH_NETWORKCTRL_MANPORTEN;
610
611 /* Initialise PHY */
612 result = phy_gecko_init(&cfg->phy);
613 if (result < 0) {
614 LOG_ERR("ETH PHY Initialization Error");
615 return;
616 }
617
618 /* Initialise TX/RX semaphores */
619 k_sem_init(&dev_data->tx_sem, 1, ETH_TX_BUF_COUNT);
620 k_sem_init(&dev_data->rx_sem, 0, K_SEM_MAX_LIMIT);
621
622 /* Start interruption-poll thread */
623 k_thread_create(&dev_data->rx_thread, dev_data->rx_thread_stack,
624 K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack),
625 rx_thread, (void *) dev, NULL, NULL,
626 K_PRIO_COOP(CONFIG_ETH_GECKO_RX_THREAD_PRIO),
627 0, K_NO_WAIT);
628 }
629
eth_gecko_get_capabilities(const struct device * dev)630 static enum ethernet_hw_caps eth_gecko_get_capabilities(const struct device *dev)
631 {
632 ARG_UNUSED(dev);
633
634 return (ETHERNET_AUTO_NEGOTIATION_SET | ETHERNET_LINK_10BASE_T |
635 ETHERNET_LINK_100BASE_T | ETHERNET_DUPLEX_SET);
636 }
637
638 static const struct ethernet_api eth_api = {
639 .iface_api.init = eth_iface_init,
640 .get_capabilities = eth_gecko_get_capabilities,
641 .send = eth_tx,
642 };
643
eth0_irq_config(void)644 static void eth0_irq_config(void)
645 {
646 IRQ_CONNECT(DT_INST_IRQN(0),
647 DT_INST_IRQ(0, priority), eth_isr,
648 DEVICE_DT_INST_GET(0), 0);
649 irq_enable(DT_INST_IRQN(0));
650 }
651
652 static const struct eth_gecko_pin_list pins_eth0 = {
653 .mdio = PIN_LIST_PHY,
654 .rmii = PIN_LIST_RMII
655 };
656
657 static const struct eth_gecko_dev_cfg eth0_config = {
658 .regs = (ETH_TypeDef *)
659 DT_INST_REG_ADDR(0),
660 .pin_list = &pins_eth0,
661 .pin_list_size = ARRAY_SIZE(pins_eth0.mdio) +
662 ARRAY_SIZE(pins_eth0.rmii),
663 .config_func = eth0_irq_config,
664 .phy = { (ETH_TypeDef *)
665 DT_INST_REG_ADDR(0),
666 DT_INST_PROP(0, phy_address) },
667 };
668
669 static struct eth_gecko_dev_data eth0_data = {
670 #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
671 .mac_addr = DT_INST_PROP(0, local_mac_address),
672 #endif
673 };
674
675 ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_init,
676 NULL, ð0_data, ð0_config,
677 CONFIG_ETH_INIT_PRIORITY, ð_api, ETH_GECKO_MTU);
678