1 /*
2  * Driver for Synopsys DesignWare MAC
3  *
4  * Copyright (c) 2021 BayLibre SAS
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 
10 #define LOG_MODULE_NAME dwmac_core
11 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
14 
15 #include <sys/types.h>
16 #include <zephyr/kernel.h>
17 #include <zephyr/cache.h>
18 #include <zephyr/net/ethernet.h>
19 #include <zephyr/sys/barrier.h>
20 #include <ethernet/eth_stats.h>
21 
22 #include "eth_dwmac_priv.h"
23 #include "eth.h"
24 
25 
26 /*
27  * This driver references network data fragments with a zero-copy approach.
28  * Even though the hardware can store received packets with an arbitrary
29  * offset in memory, the gap bytes in the first word will be overwritten,
30  * and subsequent fragments have to be buswidth-aligned anyway.
31  * This means CONFIG_NET_BUF_VARIABLE_DATA_SIZE requires special care due
32  * to its refcount byte placement, so we take the easy way out for now.
33  */
34 #ifdef CONFIG_NET_BUF_VARIABLE_DATA_SIZE
35 #error "CONFIG_NET_BUF_VARIABLE_DATA_SIZE=y is not supported"
36 #endif
37 
38 /* size of pre-allocated packet fragments */
39 #define RX_FRAG_SIZE CONFIG_NET_BUF_DATA_SIZE
40 
41 /*
42  * Grace period to wait for TX descriptor/fragment availability.
43  * Worst case estimate is 1514*8 bits at 10 mbps for an existing packet
44  * to be sent and freed, therefore 1ms is far more than enough.
45  * Beyond that we'll drop the packet.
46  */
47 #define TX_AVAIL_WAIT K_MSEC(1)
48 
49 /* descriptor index iterators */
50 #define INC_WRAP(idx, size) ({ idx = (idx + 1) % size; })
51 #define DEC_WRAP(idx, size) ({ idx = (idx + size - 1) % size; })
52 
53 /*
54  * Descriptor physical location .
55  * MMU is special here as we have a separate uncached mapping that is
56  * different from the normal RAM virt_to_phys mapping.
57  */
58 #ifdef CONFIG_MMU
59 #define TXDESC_PHYS_H(idx) hi32(p->tx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc))
60 #define TXDESC_PHYS_L(idx) lo32(p->tx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc))
61 #define RXDESC_PHYS_H(idx) hi32(p->rx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc))
62 #define RXDESC_PHYS_L(idx) lo32(p->rx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc))
63 #else
64 #define TXDESC_PHYS_H(idx) phys_hi32(&p->tx_descs[idx])
65 #define TXDESC_PHYS_L(idx) phys_lo32(&p->tx_descs[idx])
66 #define RXDESC_PHYS_H(idx) phys_hi32(&p->rx_descs[idx])
67 #define RXDESC_PHYS_L(idx) phys_lo32(&p->rx_descs[idx])
68 #endif
69 
hi32(uintptr_t val)70 static inline uint32_t hi32(uintptr_t val)
71 {
72 	/* trickery to avoid compiler warnings on 32-bit build targets */
73 	if (sizeof(uintptr_t) > 4) {
74 		uint64_t hi = val;
75 
76 		return hi >> 32;
77 	}
78 	return 0;
79 }
80 
lo32(uintptr_t val)81 static inline uint32_t lo32(uintptr_t val)
82 {
83 	/* just a typecast return to be symmetric with hi32() */
84 	return val;
85 }
86 
phys_hi32(void * addr)87 static inline uint32_t phys_hi32(void *addr)
88 {
89 	/* the default 1:1 mapping is assumed */
90 	return hi32((uintptr_t)addr);
91 }
92 
phys_lo32(void * addr)93 static inline uint32_t phys_lo32(void *addr)
94 {
95 	/* the default 1:1 mapping is assumed */
96 	return lo32((uintptr_t)addr);
97 }
98 
dwmac_caps(const struct device * dev)99 static enum ethernet_hw_caps dwmac_caps(const struct device *dev)
100 {
101 	struct dwmac_priv *p = dev->data;
102 	enum ethernet_hw_caps caps = 0;
103 
104 	if (p->feature0 & MAC_HW_FEATURE0_GMIISEL) {
105 		caps |= ETHERNET_LINK_1000BASE_T;
106 	}
107 
108 	if (p->feature0 & MAC_HW_FEATURE0_MIISEL) {
109 		caps |= ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T;
110 	}
111 
112 	caps |= ETHERNET_PROMISC_MODE;
113 
114 	return caps;
115 }
116 
117 /* for debug logs */
net_pkt_get_nbfrags(struct net_pkt * pkt)118 static inline int net_pkt_get_nbfrags(struct net_pkt *pkt)
119 {
120 	struct net_buf *frag;
121 	int nbfrags = 0;
122 
123 	for (frag = pkt->buffer; frag; frag = frag->frags) {
124 		nbfrags++;
125 	}
126 	return nbfrags;
127 }
128 
dwmac_send(const struct device * dev,struct net_pkt * pkt)129 static int dwmac_send(const struct device *dev, struct net_pkt *pkt)
130 {
131 	struct dwmac_priv *p = dev->data;
132 	struct net_buf *frag, *pinned;
133 	unsigned int pkt_len = net_pkt_get_len(pkt);
134 	unsigned int d_idx;
135 	struct dwmac_dma_desc *d;
136 	uint32_t des2_flags, des3_flags;
137 
138 	LOG_DBG("pkt len/frags=%d/%d", pkt_len, net_pkt_get_nbfrags(pkt));
139 
140 	/* initial flag values */
141 	des2_flags = 0;
142 	des3_flags = TDES3_FD | TDES3_OWN;
143 
144 	/* map packet fragments */
145 	d_idx = p->tx_desc_head;
146 	frag = pkt->buffer;
147 	do {
148 		LOG_DBG("desc sem/head/tail=%d/%d/%d",
149 			k_sem_count_get(&p->free_tx_descs),
150 			p->tx_desc_head, p->tx_desc_tail);
151 
152 		/* reserve a free descriptor for this fragment */
153 		if (k_sem_take(&p->free_tx_descs, TX_AVAIL_WAIT) != 0) {
154 			LOG_DBG("no more free tx descriptors");
155 			goto abort;
156 		}
157 
158 		/* pin this fragment */
159 		pinned = net_buf_clone(frag, TX_AVAIL_WAIT);
160 		if (!pinned) {
161 			LOG_DBG("net_buf_clone() returned NULL");
162 			k_sem_give(&p->free_tx_descs);
163 			goto abort;
164 		}
165 		sys_cache_data_flush_range(pinned->data, pinned->len);
166 		p->tx_frags[d_idx] = pinned;
167 		LOG_DBG("d[%d]: frag %p pinned %p len %d", d_idx,
168 			frag->data, pinned->data, pinned->len);
169 
170 		/* if no more fragments after this one: */
171 		if (!frag->frags) {
172 			/* set those flags on the last descriptor */
173 			des2_flags |= TDES2_IOC;
174 			des3_flags |= TDES3_LD;
175 		}
176 
177 		/* fill the descriptor */
178 		d = &p->tx_descs[d_idx];
179 		d->des0 = phys_lo32(pinned->data);
180 		d->des1 = phys_hi32(pinned->data);
181 		d->des2 = pinned->len | des2_flags;
182 		d->des3 = pkt_len | des3_flags;
183 
184 		/* clear the FD flag on subsequent descriptors */
185 		des3_flags &= ~TDES3_FD;
186 
187 		INC_WRAP(d_idx, NB_TX_DESCS);
188 		frag = frag->frags;
189 	} while (frag);
190 
191 	/* make sure all the above made it to memory */
192 	barrier_dmem_fence_full();
193 
194 	/* update the descriptor index head */
195 	p->tx_desc_head = d_idx;
196 
197 	/* lastly notify the hardware */
198 	REG_WRITE(DMA_CHn_TXDESC_TAIL_PTR(0), TXDESC_PHYS_L(d_idx));
199 
200 	return 0;
201 
202 abort:
203 	while (d_idx != p->tx_desc_head) {
204 		/* release already pinned fragments */
205 		DEC_WRAP(d_idx, NB_TX_DESCS);
206 		frag = p->tx_frags[d_idx];
207 		net_pkt_frag_unref(frag);
208 		k_sem_give(&p->free_tx_descs);
209 	}
210 	return -ENOMEM;
211 }
212 
dwmac_tx_release(struct dwmac_priv * p)213 static void dwmac_tx_release(struct dwmac_priv *p)
214 {
215 	unsigned int d_idx;
216 	struct dwmac_dma_desc *d;
217 	struct net_buf *frag;
218 	uint32_t des3_val;
219 
220 	for (d_idx = p->tx_desc_tail;
221 	     d_idx != p->tx_desc_head;
222 	     INC_WRAP(d_idx, NB_TX_DESCS), k_sem_give(&p->free_tx_descs)) {
223 
224 		LOG_DBG("desc sem/tail/head=%d/%d/%d",
225 			k_sem_count_get(&p->free_tx_descs),
226 			p->tx_desc_tail, p->tx_desc_head);
227 
228 		d = &p->tx_descs[d_idx];
229 		des3_val = d->des3;
230 		LOG_DBG("TDES3[%d] = 0x%08x", d_idx, des3_val);
231 
232 		/* stop here if hardware still owns it */
233 		if (des3_val & TDES3_OWN) {
234 			break;
235 		}
236 
237 		/* release corresponding fragments */
238 		frag = p->tx_frags[d_idx];
239 		LOG_DBG("unref frag %p", frag->data);
240 		net_pkt_frag_unref(frag);
241 
242 		/* last packet descriptor: */
243 		if (des3_val & TDES3_LD) {
244 			/* log any errors */
245 			if (des3_val & TDES3_ES) {
246 				LOG_ERR("tx error (DES3 = 0x%08x)", des3_val);
247 				eth_stats_update_errors_tx(p->iface);
248 			}
249 		}
250 	}
251 	p->tx_desc_tail = d_idx;
252 }
253 
dwmac_receive(struct dwmac_priv * p)254 static void dwmac_receive(struct dwmac_priv *p)
255 {
256 	struct dwmac_dma_desc *d;
257 	struct net_buf *frag;
258 	unsigned int d_idx, bytes_so_far;
259 	uint32_t des3_val;
260 
261 	for (d_idx = p->rx_desc_tail;
262 	     d_idx != p->rx_desc_head;
263 	     INC_WRAP(d_idx, NB_RX_DESCS), k_sem_give(&p->free_rx_descs)) {
264 
265 		LOG_DBG("desc sem/tail/head=%d/%d/%d",
266 			k_sem_count_get(&p->free_rx_descs),
267 			d_idx, p->rx_desc_head);
268 
269 		d = &p->rx_descs[d_idx];
270 		des3_val = d->des3;
271 		LOG_DBG("RDES3[%d] = 0x%08x", d_idx, des3_val);
272 
273 		/* stop here if hardware still owns it */
274 		if (des3_val & RDES3_OWN) {
275 			break;
276 		}
277 
278 		/* we ignore those for now */
279 		if (des3_val & RDES3_CTXT) {
280 			continue;
281 		}
282 
283 		/* a packet's first descriptor: */
284 		if (des3_val & RDES3_FD) {
285 			p->rx_bytes = 0;
286 			if (p->rx_pkt) {
287 				LOG_ERR("d[%d] first desc but pkt exists", d_idx);
288 				eth_stats_update_errors_rx(p->iface);
289 				net_pkt_unref(p->rx_pkt);
290 			}
291 			p->rx_pkt = net_pkt_rx_alloc_on_iface(p->iface, K_NO_WAIT);
292 			if (!p->rx_pkt) {
293 				LOG_ERR("net_pkt_rx_alloc_on_iface() failed");
294 				eth_stats_update_errors_rx(p->iface);
295 			}
296 		}
297 
298 		if (!p->rx_pkt) {
299 			LOG_ERR("no rx_pkt: skipping desc %d", d_idx);
300 			continue;
301 		}
302 
303 		/* retrieve current fragment */
304 		frag = p->rx_frags[d_idx];
305 		p->rx_frags[d_idx] = NULL;
306 		bytes_so_far = FIELD_GET(RDES3_PL, des3_val);
307 		frag->len = bytes_so_far - p->rx_bytes;
308 		p->rx_bytes = bytes_so_far;
309 		net_pkt_frag_add(p->rx_pkt, frag);
310 
311 		/* last descriptor: */
312 		if (des3_val & RDES3_LD) {
313 			/* submit packet if no errors */
314 			if (!(des3_val & RDES3_ES)) {
315 				LOG_DBG("pkt len/frags=%zd/%d",
316 					net_pkt_get_len(p->rx_pkt),
317 					net_pkt_get_nbfrags(p->rx_pkt));
318 				net_recv_data(p->iface, p->rx_pkt);
319 			} else {
320 				LOG_ERR("rx error (DES3 = 0x%08x)", des3_val);
321 				eth_stats_update_errors_rx(p->iface);
322 				net_pkt_unref(p->rx_pkt);
323 			}
324 			p->rx_pkt = NULL;
325 		}
326 	}
327 	p->rx_desc_tail = d_idx;
328 }
329 
dwmac_rx_refill_thread(void * arg1,void * unused1,void * unused2)330 static void dwmac_rx_refill_thread(void *arg1, void *unused1, void *unused2)
331 {
332 	struct dwmac_priv *p = arg1;
333 	struct dwmac_dma_desc *d;
334 	struct net_buf *frag;
335 	unsigned int d_idx;
336 
337 	ARG_UNUSED(unused1);
338 	ARG_UNUSED(unused2);
339 
340 	d_idx = p->rx_desc_head;
341 	for (;;) {
342 		LOG_DBG("desc sem/head/tail=%d/%d/%d",
343 			k_sem_count_get(&p->free_rx_descs),
344 			p->rx_desc_head, p->rx_desc_tail);
345 
346 		/* wait for an empty descriptor */
347 		if (k_sem_take(&p->free_rx_descs, K_FOREVER) != 0) {
348 			LOG_ERR("can't get free RX desc to refill");
349 			break;
350 		}
351 
352 		d = &p->rx_descs[d_idx];
353 
354 		__ASSERT(!(d->des3 & RDES3_OWN),
355 			 "desc[%d]=0x%x: still hw owned! (sem/head/tail=%d/%d/%d)",
356 			 d_idx, d->des3, k_sem_count_get(&p->free_rx_descs),
357 			 p->rx_desc_head, p->rx_desc_tail);
358 
359 		frag = p->rx_frags[d_idx];
360 
361 		/* get a new fragment if the previous one was consumed */
362 		if (!frag) {
363 			frag = net_pkt_get_reserve_rx_data(RX_FRAG_SIZE, K_FOREVER);
364 			if (!frag) {
365 				LOG_ERR("net_pkt_get_reserve_rx_data() returned NULL");
366 				k_sem_give(&p->free_rx_descs);
367 				break;
368 			}
369 			LOG_DBG("new frag[%d] at %p", d_idx, frag->data);
370 			__ASSERT(frag->size == RX_FRAG_SIZE, "");
371 			sys_cache_data_invd_range(frag->data, frag->size);
372 			p->rx_frags[d_idx] = frag;
373 		} else {
374 			LOG_DBG("reusing frag[%d] at %p", d_idx, frag->data);
375 		}
376 
377 		/* all is good: initialize the descriptor */
378 		d->des0 = phys_lo32(frag->data);
379 		d->des1 = phys_hi32(frag->data);
380 		d->des2 = 0;
381 		d->des3 = RDES3_BUF1V | RDES3_IOC | RDES3_OWN;
382 
383 		/* commit the above to memory */
384 		barrier_dmem_fence_full();
385 
386 		/* advance to the next descriptor */
387 		p->rx_desc_head = INC_WRAP(d_idx, NB_RX_DESCS);
388 
389 		/* lastly notify the hardware */
390 		REG_WRITE(DMA_CHn_RXDESC_TAIL_PTR(0), RXDESC_PHYS_L(d_idx));
391 	}
392 }
393 
dwmac_dma_irq(struct dwmac_priv * p,unsigned int ch)394 static void dwmac_dma_irq(struct dwmac_priv *p, unsigned int ch)
395 {
396 	uint32_t status;
397 
398 	status = REG_READ(DMA_CHn_STATUS(ch));
399 	LOG_DBG("DMA_CHn_STATUS(%d) = 0x%08x", ch, status);
400 	REG_WRITE(DMA_CHn_STATUS(ch), status);
401 
402 	__ASSERT(ch == 0, "only one DMA channel is currently supported");
403 
404 	if (status & DMA_CHn_STATUS_AIS) {
405 		LOG_ERR("Abnormal Interrupt Status received (0x%x)", status);
406 	}
407 
408 	if (status & DMA_CHn_STATUS_TI) {
409 		dwmac_tx_release(p);
410 	}
411 
412 	if (status & DMA_CHn_STATUS_RI) {
413 		dwmac_receive(p);
414 	}
415 }
416 
dwmac_mac_irq(struct dwmac_priv * p)417 static void dwmac_mac_irq(struct dwmac_priv *p)
418 {
419 	uint32_t status;
420 
421 	status = REG_READ(MAC_IRQ_STATUS);
422 	LOG_DBG("MAC_IRQ_STATUS = 0x%08x", status);
423 	__ASSERT(false, "unimplemented");
424 }
425 
dwmac_mtl_irq(struct dwmac_priv * p)426 static void dwmac_mtl_irq(struct dwmac_priv *p)
427 {
428 	uint32_t status;
429 
430 	status = REG_READ(MTL_IRQ_STATUS);
431 	LOG_DBG("MTL_IRQ_STATUS = 0x%08x", status);
432 	__ASSERT(false, "unimplemented");
433 }
434 
dwmac_isr(const struct device * ddev)435 void dwmac_isr(const struct device *ddev)
436 {
437 	struct dwmac_priv *p = ddev->data;
438 	uint32_t irq_status;
439 	unsigned int ch;
440 
441 	irq_status = REG_READ(DMA_IRQ_STATUS);
442 	LOG_DBG("DMA_IRQ_STATUS = 0x%08x", irq_status);
443 
444 	while (irq_status & 0xff) {
445 		ch = find_lsb_set(irq_status & 0xff) - 1;
446 		irq_status &= ~BIT(ch);
447 		dwmac_dma_irq(p, ch);
448 	}
449 
450 	if (irq_status & DMA_IRQ_STATUS_MTLIS) {
451 		dwmac_mtl_irq(p);
452 	}
453 
454 	if (irq_status & DMA_IRQ_STATUS_MACIS) {
455 		dwmac_mac_irq(p);
456 	}
457 }
458 
dwmac_set_mac_addr(struct dwmac_priv * p,uint8_t * addr,int n)459 static void dwmac_set_mac_addr(struct dwmac_priv *p, uint8_t *addr, int n)
460 {
461 	uint32_t reg_val;
462 
463 	reg_val = (addr[5] << 8) | addr[4];
464 	REG_WRITE(MAC_ADDRESS_HIGH(n), reg_val | MAC_ADDRESS_HIGH_AE);
465 	reg_val = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
466 	REG_WRITE(MAC_ADDRESS_LOW(n), reg_val);
467 }
468 
dwmac_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)469 static int dwmac_set_config(const struct device *dev,
470 			    enum ethernet_config_type type,
471 			    const struct ethernet_config *config)
472 {
473 	struct dwmac_priv *p = dev->data;
474 	uint32_t reg_val;
475 	int ret = 0;
476 
477 	(void) reg_val; /* silence the "unused variable" warning */
478 
479 	switch (type) {
480 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
481 		memcpy(p->mac_addr, config->mac_address.addr, sizeof(p->mac_addr));
482 		dwmac_set_mac_addr(p, p->mac_addr, 0);
483 		net_if_set_link_addr(p->iface, p->mac_addr,
484 				     sizeof(p->mac_addr), NET_LINK_ETHERNET);
485 		break;
486 
487 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
488 	case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
489 		reg_val = REG_READ(MAC_PKT_FILTER);
490 		if (config->promisc_mode &&
491 		    !(reg_val & MAC_PKT_FILTER_PR)) {
492 			REG_WRITE(MAC_PKT_FILTER,
493 				  reg_val | MAC_PKT_FILTER_PR);
494 		} else if (!config->promisc_mode &&
495 			   (reg_val & MAC_PKT_FILTER_PR)) {
496 			REG_WRITE(MAC_PKT_FILTER,
497 				  reg_val & ~MAC_PKT_FILTER_PR);
498 		} else {
499 			ret = -EALREADY;
500 		}
501 		break;
502 #endif
503 
504 	default:
505 		ret = -ENOTSUP;
506 		break;
507 	}
508 
509 	return ret;
510 }
511 
dwmac_iface_init(struct net_if * iface)512 static void dwmac_iface_init(struct net_if *iface)
513 {
514 	struct dwmac_priv *p = net_if_get_device(iface)->data;
515 	uint32_t reg_val;
516 
517 	__ASSERT(!p->iface, "interface already initialized?");
518 	p->iface = iface;
519 
520 	ethernet_init(iface);
521 
522 	net_if_set_link_addr(iface, p->mac_addr, sizeof(p->mac_addr),
523 			     NET_LINK_ETHERNET);
524 	dwmac_set_mac_addr(p, p->mac_addr, 0);
525 
526 	/*
527 	 * Semaphores are used to represent number of available descriptors.
528 	 * The total is one less than ring size in order to always have
529 	 * at least one inactive slot for the hardware tail pointer to
530 	 * stop at and to prevent our head indexes from looping back
531 	 * onto our tail indexes.
532 	 */
533 	k_sem_init(&p->free_tx_descs, NB_TX_DESCS - 1, NB_TX_DESCS - 1);
534 	k_sem_init(&p->free_rx_descs, NB_RX_DESCS - 1, NB_RX_DESCS - 1);
535 
536 	/* set up RX buffer refill thread */
537 	k_thread_create(&p->rx_refill_thread, p->rx_refill_thread_stack,
538 			K_KERNEL_STACK_SIZEOF(p->rx_refill_thread_stack),
539 			dwmac_rx_refill_thread, p, NULL, NULL,
540 			0, K_PRIO_PREEMPT(0), K_NO_WAIT);
541 	k_thread_name_set(&p->rx_refill_thread, "dwmac_rx_refill");
542 
543 	/* start up TX/RX */
544 	reg_val = REG_READ(DMA_CHn_TX_CTRL(0));
545 	REG_WRITE(DMA_CHn_TX_CTRL(0), reg_val | DMA_CHn_TX_CTRL_St);
546 	reg_val = REG_READ(DMA_CHn_RX_CTRL(0));
547 	REG_WRITE(DMA_CHn_RX_CTRL(0), reg_val | DMA_CHn_RX_CTRL_SR);
548 	reg_val = REG_READ(MAC_CONF);
549 	reg_val |= MAC_CONF_CST | MAC_CONF_TE | MAC_CONF_RE;
550 	REG_WRITE(MAC_CONF, reg_val);
551 
552 	/* unmask IRQs */
553 	REG_WRITE(DMA_CHn_IRQ_ENABLE(0),
554 		  DMA_CHn_IRQ_ENABLE_TIE |
555 		  DMA_CHn_IRQ_ENABLE_RIE |
556 		  DMA_CHn_IRQ_ENABLE_NIE |
557 		  DMA_CHn_IRQ_ENABLE_FBEE |
558 		  DMA_CHn_IRQ_ENABLE_CDEE |
559 		  DMA_CHn_IRQ_ENABLE_AIE);
560 
561 	LOG_DBG("done");
562 }
563 
dwmac_probe(const struct device * dev)564 int dwmac_probe(const struct device *dev)
565 {
566 	struct dwmac_priv *p = dev->data;
567 	int ret;
568 	uint32_t reg_val;
569 	k_timepoint_t timeout;
570 
571 	ret = dwmac_bus_init(p);
572 	if (ret != 0) {
573 		return ret;
574 	}
575 
576 	reg_val = REG_READ(MAC_VERSION);
577 	LOG_INF("HW version %u.%u0", (reg_val >> 4) & 0xf, reg_val & 0xf);
578 	__ASSERT(FIELD_GET(MAC_VERSION_SNPSVER, reg_val) >= 0x40,
579 		 "This driver expects DWC-ETHERNET version >= 4.00");
580 
581 	/* resets all of the MAC internal registers and logic */
582 	REG_WRITE(DMA_MODE, DMA_MODE_SWR);
583 	timeout = sys_timepoint_calc(K_MSEC(100));
584 	while (REG_READ(DMA_MODE) & DMA_MODE_SWR) {
585 		if (sys_timepoint_expired(timeout)) {
586 			LOG_ERR("unable to reset hardware");
587 			return -EIO;
588 		}
589 	}
590 
591 	/* get configured hardware features */
592 	p->feature0 = REG_READ(MAC_HW_FEATURE0);
593 	p->feature1 = REG_READ(MAC_HW_FEATURE1);
594 	p->feature2 = REG_READ(MAC_HW_FEATURE2);
595 	p->feature3 = REG_READ(MAC_HW_FEATURE3);
596 	LOG_DBG("hw_feature: 0x%08x 0x%08x 0x%08x 0x%08x",
597 		p->feature0, p->feature1, p->feature2, p->feature3);
598 
599 	dwmac_platform_init(p);
600 
601 	memset(p->tx_descs, 0, NB_TX_DESCS * sizeof(struct dwmac_dma_desc));
602 	memset(p->rx_descs, 0, NB_RX_DESCS * sizeof(struct dwmac_dma_desc));
603 
604 	/* set up DMA */
605 	REG_WRITE(DMA_CHn_TX_CTRL(0), 0);
606 	REG_WRITE(DMA_CHn_RX_CTRL(0),
607 		  FIELD_PREP(DMA_CHn_RX_CTRL_PBL, 32) |
608 		  FIELD_PREP(DMA_CHn_RX_CTRL_RBSZ, RX_FRAG_SIZE));
609 	REG_WRITE(DMA_CHn_TXDESC_LIST_HADDR(0), TXDESC_PHYS_H(0));
610 	REG_WRITE(DMA_CHn_TXDESC_LIST_ADDR(0), TXDESC_PHYS_L(0));
611 	REG_WRITE(DMA_CHn_RXDESC_LIST_HADDR(0), RXDESC_PHYS_H(0));
612 	REG_WRITE(DMA_CHn_RXDESC_LIST_ADDR(0), RXDESC_PHYS_L(0));
613 	REG_WRITE(DMA_CHn_TXDESC_RING_LENGTH(0), NB_TX_DESCS - 1);
614 	REG_WRITE(DMA_CHn_RXDESC_RING_LENGTH(0), NB_RX_DESCS - 1);
615 
616 	return 0;
617 }
618 
619 const struct ethernet_api dwmac_api = {
620 	.iface_api.init		= dwmac_iface_init,
621 	.get_capabilities	= dwmac_caps,
622 	.set_config		= dwmac_set_config,
623 	.send			= dwmac_send,
624 };
625