1 /* XMC4XXX Ethernet controller
2  *
3  * Copyright (c) 2023 SLB
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT infineon_xmc4xxx_ethernet
9 
10 #include "eth.h"
11 
12 #include <stdint.h>
13 
14 #include <soc.h>
15 #include <zephyr/device.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <zephyr/drivers/ptp_clock.h>
18 #include <zephyr/net/ethernet.h>
19 #include <zephyr/net/gptp.h>
20 #include <zephyr/net/net_pkt.h>
21 #include <zephyr/net/net_if.h>
22 #include <zephyr/net/phy.h>
23 #include <zephyr/kernel.h>
24 #include <zephyr/sys/slist.h>
25 #include <zephyr/sys/util.h>
26 
27 #include <ethernet/eth_stats.h>
28 
29 #include <xmc_eth_mac.h>
30 #include <xmc_scu.h>
31 
32 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
33 #include <zephyr/logging/log.h>
34 LOG_MODULE_REGISTER(eth_xmc4xxx);
35 
36 #define NUM_TX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_TX_DMA_DESCRIPTORS
37 #define NUM_RX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_RX_DMA_DESCRIPTORS
38 
39 #define ETH_NODE DT_NODELABEL(eth)
40 #define PHY_NODE DT_PHANDLE_BY_IDX(ETH_NODE, phy, 0)
41 
42 #define INFINEON_OUI_B0 0x00
43 #define INFINEON_OUI_B1 0x03
44 #define INFINEON_OUI_B2 0x19
45 
46 #define MODULO_INC_TX(val) {(val) = (++(val) < NUM_TX_DMA_DESCRIPTORS) ? (val) : 0; }
47 #define MODULO_INC_RX(val) {(val) = (++(val) < NUM_RX_DMA_DESCRIPTORS) ? (val) : 0; }
48 
49 #define IS_OWNED_BY_DMA_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_OWN) != 0)
50 #define IS_OWNED_BY_DMA_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_OWN) != 0)
51 
52 #define IS_START_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_FS) != 0)
53 #define IS_END_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_LS) != 0)
54 
55 #define IS_TIMESTAMP_AVAILABLE_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_TSA) != 0)
56 #define IS_TIMESTAMP_AVAILABLE_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_TTSS) != 0)
57 
58 #define TOTAL_FRAME_LENGTH(desc) (FIELD_GET(ETH_MAC_DMA_RDES0_FL, (desc)->status) - 4)
59 
60 #define ETH_STATUS_ERROR_TRANSMIT_EVENTS                                                           \
61 	(XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_TRANSMIT_JABBER_TIMEOUT |                 \
62 	 XMC_ETH_MAC_EVENT_TRANSMIT_UNDERFLOW | XMC_ETH_MAC_EVENT_TRANSMIT_PROCESS_STOPPED)
63 
64 #define ETH_STATUS_ERROR_RECEIVE_EVENTS                                                            \
65 	(XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_RECEIVE_OVERFLOW)
66 
67 #define ETH_STATUS_ALL_EVENTS                                                                      \
68 	(ETH_STATUS_ERROR_TRANSMIT_EVENTS | ETH_STATUS_ERROR_RECEIVE_EVENTS |                      \
69 	 XMC_ETH_MAC_EVENT_RECEIVE | XMC_ETH_MAC_EVENT_TRANSMIT | ETH_INTERRUPT_ENABLE_NIE_Msk |   \
70 	 ETH_INTERRUPT_ENABLE_AIE_Msk)
71 
72 #define ETH_MAC_DISABLE_MMC_INTERRUPT_MSK              0x03ffffffu
73 #define ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK  0x3fff3fffu
74 
75 #define ETH_STATUS_CLEARABLE_BITS 0x1e7ffu
76 
77 #define ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK BIT(14)
78 
79 #define ETH_RESET_TIMEOUT_USEC 200000u
80 #define ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC 100000u
81 
82 #define ETH_LINK_SPEED_10M 0
83 #define ETH_LINK_SPEED_100M 1
84 
85 #define ETH_LINK_DUPLEX_HALF 0
86 #define ETH_LINK_DUPLEX_FULL 1
87 
88 #define ETH_PTP_CLOCK_FREQUENCY 50000000
89 #define ETH_PTP_RATE_ADJUST_RATIO_MIN 0.9
90 #define ETH_PTP_RATE_ADJUST_RATIO_MAX 1.1
91 
92 struct eth_xmc4xxx_data {
93 	struct net_if *iface;
94 	uint8_t mac_addr[6];
95 	struct k_sem tx_desc_sem;
96 	bool link_up;
97 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
98 	struct net_stats_eth stats;
99 #endif
100 	bool tx_frames_flushed;
101 	uint16_t dma_desc_tx_head;
102 	uint16_t dma_desc_rx_tail;
103 	sys_slist_t tx_frame_list;
104 	struct net_buf *rx_frag_list[NUM_RX_DMA_DESCRIPTORS];
105 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
106 	const struct device *ptp_clock;
107 #endif
108 };
109 
110 struct eth_xmc4xxx_config {
111 	ETH_GLOBAL_TypeDef *regs;
112 	const struct device *phy_dev;
113 	void (*irq_config_func)(void);
114 	const struct pinctrl_dev_config *pcfg;
115 	const uint8_t phy_connection_type;
116 	XMC_ETH_MAC_PORT_CTRL_t port_ctrl;
117 };
118 
119 struct eth_xmc4xxx_tx_frame {
120 	sys_snode_t node;
121 	struct net_pkt *pkt;
122 	uint16_t tail_index;
123 	uint16_t head_index;
124 };
125 
126 K_MEM_SLAB_DEFINE_STATIC(tx_frame_slab, sizeof(struct eth_xmc4xxx_tx_frame),
127 			 CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE, 4);
128 
129 static XMC_ETH_MAC_DMA_DESC_t __aligned(4) tx_dma_desc[NUM_TX_DMA_DESCRIPTORS];
130 static XMC_ETH_MAC_DMA_DESC_t __aligned(4) rx_dma_desc[NUM_RX_DMA_DESCRIPTORS];
131 
get_iface(struct eth_xmc4xxx_data * ctx,uint16_t vlan_tag)132 static inline struct net_if *get_iface(struct eth_xmc4xxx_data *ctx, uint16_t vlan_tag)
133 {
134 #if defined(CONFIG_NET_VLAN)
135 	struct net_if *iface;
136 
137 	iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
138 	if (!iface) {
139 		return ctx->iface;
140 	}
141 
142 	return iface;
143 #else
144 	ARG_UNUSED(vlan_tag);
145 
146 	return ctx->iface;
147 #endif
148 }
149 
eth_xmc4xxx_tx_dma_descriptors_init(const struct device * dev)150 static void eth_xmc4xxx_tx_dma_descriptors_init(const struct device *dev)
151 {
152 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
153 
154 	memset(tx_dma_desc, 0, sizeof(tx_dma_desc));
155 
156 	dev_cfg->regs->TRANSMIT_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&tx_dma_desc[0];
157 
158 	/* chain the descriptors */
159 	for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS - 1; i++) {
160 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[i];
161 
162 		dma_desc->buffer2 = (volatile uint32_t)&tx_dma_desc[i + 1];
163 	}
164 
165 	/* TER: transmit end of ring - it is the last descriptor in ring */
166 	tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER;
167 	tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&tx_dma_desc[0];
168 }
169 
eth_xmc4xxx_flush_rx(const struct device * dev)170 static void eth_xmc4xxx_flush_rx(const struct device *dev)
171 {
172 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
173 	struct eth_xmc4xxx_data *dev_data = dev->data;
174 
175 	dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_SR_Msk;
176 
177 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
178 		rx_dma_desc[i].status = ETH_MAC_DMA_RDES0_OWN;
179 	}
180 
181 	dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk;
182 	dev_data->dma_desc_rx_tail = 0;
183 }
184 
eth_xmc4xxx_flush_tx(const struct device * dev)185 static void eth_xmc4xxx_flush_tx(const struct device *dev)
186 {
187 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
188 	struct eth_xmc4xxx_data *dev_data = dev->data;
189 	sys_snode_t *node;
190 
191 	LOG_DBG("Flushing tx frames");
192 
193 	if (dev_data->tx_frames_flushed) {
194 		return;
195 	}
196 
197 	dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_ST_Msk;
198 
199 	node = sys_slist_get(&dev_data->tx_frame_list);
200 	while (node) {
201 		struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node);
202 
203 		net_pkt_unref(tx_frame->pkt);
204 		k_mem_slab_free(&tx_frame_slab, (void *)tx_frame);
205 
206 		node = sys_slist_get(&dev_data->tx_frame_list);
207 #ifdef CONFIG_NET_STATISTICS_ETHERNET
208 		dev_data->stats.errors.tx++;
209 		dev_data->stats.error_details.tx_aborted_errors++;
210 #endif
211 	}
212 
213 	k_sem_reset(&dev_data->tx_desc_sem);
214 
215 	eth_xmc4xxx_tx_dma_descriptors_init(dev);
216 	dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk;
217 	dev_data->dma_desc_tx_head = 0;
218 	dev_data->tx_frames_flushed = true;
219 
220 	for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS; i++) {
221 		k_sem_give(&dev_data->tx_desc_sem);
222 	}
223 }
224 
eth_xmc4xxx_trigger_dma_tx(ETH_GLOBAL_TypeDef * regs)225 static inline void eth_xmc4xxx_trigger_dma_tx(ETH_GLOBAL_TypeDef *regs)
226 {
227 	regs->STATUS = ETH_STATUS_TPS_Msk;
228 	regs->TRANSMIT_POLL_DEMAND = 0;
229 }
230 
eth_xmc4xxx_trigger_dma_rx(ETH_GLOBAL_TypeDef * regs)231 static inline void eth_xmc4xxx_trigger_dma_rx(ETH_GLOBAL_TypeDef *regs)
232 {
233 	regs->STATUS = ETH_STATUS_RU_Msk;
234 	regs->RECEIVE_POLL_DEMAND = 0U;
235 }
236 
eth_xmc4xxx_send(const struct device * dev,struct net_pkt * pkt)237 static int eth_xmc4xxx_send(const struct device *dev, struct net_pkt *pkt)
238 {
239 	struct eth_xmc4xxx_data *dev_data = dev->data;
240 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
241 	struct net_buf *frag;
242 	uint8_t *frag_data;
243 	uint16_t frag_len;
244 	int ret = 0;
245 	XMC_ETH_MAC_DMA_DESC_t *dma_desc = NULL;
246 	struct eth_xmc4xxx_tx_frame *tx_frame;
247 	int num_frags = 0;
248 	bool first_descriptor = false;
249 
250 	frag = pkt->frags;
251 	while (frag) {
252 		num_frags++;
253 		frag = frag->frags;
254 	}
255 
256 	if (num_frags > NUM_TX_DMA_DESCRIPTORS) {
257 #ifdef CONFIG_NET_STATISTICS_ETHERNET
258 		dev_data->stats.error_details.tx_dma_failed++;
259 #endif
260 		LOG_DBG("Number of fragments exceeds total descriptors. Dropping packet");
261 		return -ENOMEM;
262 	}
263 
264 	/* All available frames buffered inside the driver. Apply back pressure in the driver. */
265 	while (tx_frame_slab.info.num_used == CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE) {
266 		eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs);
267 		k_yield();
268 	}
269 
270 	ret = k_mem_slab_alloc(&tx_frame_slab, (void **)&tx_frame, K_NO_WAIT);
271 	__ASSERT_NO_MSG(ret == 0);
272 
273 	net_pkt_ref(pkt);
274 
275 	dev_data->tx_frames_flushed = false;
276 
277 	first_descriptor = true;
278 	tx_frame->pkt = pkt;
279 	tx_frame->tail_index = dev_data->dma_desc_tx_head;
280 
281 	frag = pkt->frags;
282 	while (frag) {
283 		ret = k_sem_take(&dev_data->tx_desc_sem, K_FOREVER);
284 		/* isr may call k_sem_reset() */
285 		if (ret < 0 || dev_data->tx_frames_flushed) {
286 			k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame);
287 			net_pkt_unref(pkt);
288 #ifdef CONFIG_NET_STATISTICS_ETHERNET
289 			dev_data->stats.error_details.tx_aborted_errors++;
290 #endif
291 			LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR.");
292 			return -EIO;
293 		}
294 
295 		unsigned int key = irq_lock();
296 		/* Critical section for dma_desc_tx_head and tx_dma_desc. Isr may */
297 		/* reinitialize the descriptors and set dma_desc_tx_head to 0 */
298 
299 		dma_desc = &tx_dma_desc[dev_data->dma_desc_tx_head];
300 
301 		frag_data = frag->data;
302 		frag_len = frag->len;
303 
304 		dma_desc->buffer1 = (volatile uint32_t)frag_data;
305 		dma_desc->length = frag_len;
306 
307 		/* give ownership of descriptor back to dma and set checksum offload */
308 		/* TCH we are using a circular list */
309 		dma_desc->status = ETH_MAC_DMA_TDES0_CIC | ETH_MAC_DMA_TDES0_TCH;
310 
311 		if (!first_descriptor) {
312 			/* Delay giving ownership of first frag to DMA. Prevents race condition */
313 			/* where second other frags are not ready */
314 			dma_desc->status |= ETH_MAC_DMA_TDES0_OWN;
315 		} else {
316 			dma_desc->status |= ETH_MAC_DMA_TDES0_FS;
317 
318 #if defined(CONFIG_NET_GPTP)
319 			struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
320 
321 			if (ntohs(hdr->type) == NET_ETH_PTYPE_PTP) {
322 				dma_desc->status |= ETH_MAC_DMA_TDES0_TTSE;
323 			}
324 #endif
325 		}
326 		first_descriptor = false;
327 
328 		tx_frame->head_index = dev_data->dma_desc_tx_head;
329 
330 		MODULO_INC_TX(dev_data->dma_desc_tx_head);
331 
332 		irq_unlock(key);
333 
334 		frag = frag->frags;
335 	}
336 
337 	if (dev_data->tx_frames_flushed) {
338 		k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame);
339 		net_pkt_unref(pkt);
340 #ifdef CONFIG_NET_STATISTICS_ETHERNET
341 		dev_data->stats.error_details.tx_aborted_errors++;
342 #endif
343 		LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR.");
344 		return -EIO;
345 	}
346 
347 	unsigned int key = irq_lock();
348 
349 	/* label last dma descriptor as last segment and trigger interrupt on last segment */
350 	dma_desc->status |= ETH_MAC_DMA_TDES0_IC | ETH_MAC_DMA_TDES0_LS;
351 
352 	/* Finally give ownership of first frag to DMA. After this point the DMA engine */
353 	/* may transfer the whole frame from RAM to Ethernet */
354 	tx_dma_desc[tx_frame->tail_index].status |= ETH_MAC_DMA_TDES0_OWN;
355 
356 	sys_slist_append(&dev_data->tx_frame_list, &tx_frame->node);
357 
358 	eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs);
359 
360 	irq_unlock(key);
361 
362 	return 0;
363 }
364 
eth_xmc4xxx_rx_pkt(const struct device * dev)365 static struct net_pkt *eth_xmc4xxx_rx_pkt(const struct device *dev)
366 {
367 	struct eth_xmc4xxx_data *dev_data = dev->data;
368 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
369 	struct net_pkt *pkt = NULL;
370 	struct net_buf *new_frag;
371 
372 	bool eof_found = false;
373 	uint16_t tail;
374 	XMC_ETH_MAC_DMA_DESC_t *dma_desc;
375 	int num_frags = 0;
376 	uint16_t frame_end_index;
377 	struct net_buf *frag, *last_frag = NULL;
378 
379 	tail = dev_data->dma_desc_rx_tail;
380 	dma_desc = &rx_dma_desc[tail];
381 
382 	if (IS_OWNED_BY_DMA_RX(dma_desc)) {
383 		return NULL;
384 	}
385 
386 	if (!IS_START_OF_FRAME_RX(dma_desc)) {
387 		/* handle this error - missing SOF packet? */
388 		eth_xmc4xxx_flush_rx(dev);
389 		return NULL;
390 	}
391 
392 	while (!IS_OWNED_BY_DMA_RX(dma_desc)) {
393 		eof_found = IS_END_OF_FRAME_RX(dma_desc);
394 		num_frags++;
395 		if (eof_found) {
396 			break;
397 		}
398 
399 		MODULO_INC_RX(tail);
400 
401 		if (tail == dev_data->dma_desc_rx_tail) {
402 			/* wrapped */
403 			break;
404 		}
405 
406 		dma_desc = &rx_dma_desc[tail];
407 	}
408 
409 	if (!eof_found) {
410 		return NULL;
411 	}
412 
413 	frame_end_index = tail;
414 
415 	pkt = net_pkt_rx_alloc(K_NO_WAIT);
416 	if (pkt == NULL) {
417 #ifdef CONFIG_NET_STATISTICS_ETHERNET
418 		dev_data->stats.errors.rx++;
419 		dev_data->stats.error_details.rx_no_buffer_count++;
420 #endif
421 		LOG_DBG("Net packet allocation error");
422 		/* continue because we still need to read out the packet */
423 	}
424 
425 	tail = dev_data->dma_desc_rx_tail;
426 	dma_desc = &rx_dma_desc[tail];
427 	for (;;) {
428 		if (pkt != NULL) {
429 			uint16_t frag_len = CONFIG_NET_BUF_DATA_SIZE;
430 
431 			frag = dev_data->rx_frag_list[tail];
432 			if (tail == frame_end_index) {
433 				frag_len = TOTAL_FRAME_LENGTH(dma_desc) -
434 					   CONFIG_NET_BUF_DATA_SIZE * (num_frags - 1);
435 
436 				if (IS_TIMESTAMP_AVAILABLE_RX(dma_desc)) {
437 					struct net_ptp_time timestamp = {
438 						.second = dma_desc->time_stamp_seconds,
439 						.nanosecond = dma_desc->time_stamp_nanoseconds};
440 
441 					net_pkt_set_timestamp(pkt, &timestamp);
442 					net_pkt_set_priority(pkt, NET_PRIORITY_CA);
443 				}
444 			}
445 
446 			new_frag = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
447 			if (new_frag == NULL) {
448 #ifdef CONFIG_NET_STATISTICS_ETHERNET
449 				dev_data->stats.errors.rx++;
450 				dev_data->stats.error_details.rx_buf_alloc_failed++;
451 #endif
452 				LOG_DBG("Frag allocation error. Increase CONFIG_NET_BUF_RX_COUNT.");
453 				net_pkt_unref(pkt);
454 				pkt = NULL;
455 			} else {
456 				net_buf_add(frag, frag_len);
457 				if (!last_frag) {
458 					net_pkt_frag_insert(pkt, frag);
459 				} else {
460 					net_buf_frag_insert(last_frag, frag);
461 				}
462 
463 				last_frag = frag;
464 				frag = new_frag;
465 				dev_data->rx_frag_list[tail] = frag;
466 			}
467 		}
468 
469 		dma_desc->buffer1 = (uint32_t)dev_data->rx_frag_list[tail]->data;
470 		dma_desc->length = dev_data->rx_frag_list[tail]->size |
471 				   ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK;
472 		dma_desc->status = ETH_MAC_DMA_RDES0_OWN;
473 
474 		if (tail == frame_end_index) {
475 			break;
476 		}
477 
478 		MODULO_INC_RX(tail);
479 		dma_desc = &rx_dma_desc[tail];
480 	}
481 
482 
483 	MODULO_INC_RX(tail);
484 	dev_data->dma_desc_rx_tail = tail;
485 
486 	eth_xmc4xxx_trigger_dma_rx(dev_cfg->regs);
487 
488 	return pkt;
489 }
490 
eth_xmc4xxx_handle_rx(const struct device * dev)491 static void eth_xmc4xxx_handle_rx(const struct device *dev)
492 {
493 	struct eth_xmc4xxx_data *dev_data = dev->data;
494 	struct net_pkt *pkt = NULL;
495 
496 	for (;;) {
497 		uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
498 
499 		pkt = eth_xmc4xxx_rx_pkt(dev);
500 		if (!pkt) {
501 			return;
502 		}
503 #if defined(CONFIG_NET_VLAN)
504 		struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
505 
506 		if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
507 			struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)hdr;
508 
509 			net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
510 			vlan_tag = net_pkt_vlan_tag(pkt);
511 
512 #if CONFIG_NET_TC_RX_COUNT > 1
513 			enum net_priority prio;
514 
515 			prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
516 			net_pkt_set_priority(pkt, prio);
517 #endif
518 		}
519 #endif /* CONFIG_NET_VLAN */
520 		if (net_recv_data(get_iface(dev_data, vlan_tag), pkt) < 0) {
521 			eth_stats_update_errors_rx(get_iface(dev_data, vlan_tag));
522 			net_pkt_unref(pkt);
523 		}
524 	}
525 }
526 
eth_xmc4xxx_handle_tx(const struct device * dev)527 static void eth_xmc4xxx_handle_tx(const struct device *dev)
528 {
529 	struct eth_xmc4xxx_data *dev_data = dev->data;
530 	sys_snode_t *node = sys_slist_peek_head(&dev_data->tx_frame_list);
531 
532 	while (node) {
533 		struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node);
534 		bool owned_by_mcu = true;
535 		uint8_t index;
536 		int num_descriptors;
537 
538 		if (tx_frame->head_index >= tx_frame->tail_index) {
539 			num_descriptors = tx_frame->head_index - tx_frame->tail_index + 1;
540 		} else {
541 			num_descriptors = tx_frame->head_index + NUM_TX_DMA_DESCRIPTORS -
542 					  tx_frame->tail_index + 1;
543 		}
544 
545 		index = tx_frame->tail_index;
546 		for (int i = 0; i < num_descriptors; i++) {
547 			if (IS_OWNED_BY_DMA_TX(&tx_dma_desc[index])) {
548 				owned_by_mcu = false;
549 				break;
550 			}
551 
552 			MODULO_INC_TX(index);
553 		}
554 
555 		if (owned_by_mcu) {
556 #if defined(CONFIG_NET_GPTP)
557 			XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[tx_frame->head_index];
558 
559 			if (IS_TIMESTAMP_AVAILABLE_TX(dma_desc)) {
560 				struct net_pkt *pkt = tx_frame->pkt;
561 
562 				if (atomic_get(&pkt->atomic_ref) > 1) {
563 					struct net_ptp_time timestamp = {
564 						.second = dma_desc->time_stamp_seconds,
565 						.nanosecond = dma_desc->time_stamp_nanoseconds};
566 
567 					net_pkt_set_timestamp(pkt, &timestamp);
568 					net_if_add_tx_timestamp(pkt);
569 				}
570 			}
571 #endif
572 
573 			for (int i = 0; i < num_descriptors; i++) {
574 				k_sem_give(&dev_data->tx_desc_sem);
575 			}
576 
577 			sys_slist_get(&dev_data->tx_frame_list);
578 			net_pkt_unref(tx_frame->pkt);
579 			k_mem_slab_free(&tx_frame_slab, (void *)tx_frame);
580 			node = sys_slist_peek_head(&dev_data->tx_frame_list);
581 		} else {
582 			node = NULL;
583 		}
584 	}
585 }
586 
eth_xmc4xxx_isr(const struct device * dev)587 static void eth_xmc4xxx_isr(const struct device *dev)
588 {
589 	uint32_t lock;
590 	uint32_t status;
591 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
592 
593 	lock = irq_lock();
594 	status = dev_cfg->regs->STATUS;
595 
596 	if ((status & XMC_ETH_MAC_EVENT_RECEIVE) != 0) {
597 		eth_xmc4xxx_handle_rx(dev);
598 	}
599 
600 	if ((status & XMC_ETH_MAC_EVENT_TRANSMIT) != 0) {
601 		eth_xmc4xxx_handle_tx(dev);
602 	}
603 
604 	if ((status & ETH_STATUS_ERROR_TRANSMIT_EVENTS) != 0) {
605 		LOG_ERR("Transmit error event [0x%x]", status);
606 		eth_xmc4xxx_flush_tx(dev);
607 	}
608 
609 	if ((status & ETH_STATUS_ERROR_RECEIVE_EVENTS) != 0) {
610 		LOG_ERR("Receive error event [0x%x]", status);
611 		eth_xmc4xxx_flush_rx(dev);
612 	}
613 
614 	dev_cfg->regs->STATUS = status & ETH_STATUS_CLEARABLE_BITS;
615 
616 	irq_unlock(lock);
617 }
618 
eth_xmc4xxx_enable_tx(ETH_GLOBAL_TypeDef * regs)619 static inline void eth_xmc4xxx_enable_tx(ETH_GLOBAL_TypeDef *regs)
620 {
621 	regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk;
622 	regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_TE_Msk;
623 }
624 
eth_xmc4xxx_enable_rx(ETH_GLOBAL_TypeDef * regs)625 static inline void eth_xmc4xxx_enable_rx(ETH_GLOBAL_TypeDef *regs)
626 {
627 	regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk;
628 	regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_RE_Msk;
629 }
630 
eth_xmc4xxx_set_link(ETH_GLOBAL_TypeDef * regs,struct phy_link_state * state)631 static inline void eth_xmc4xxx_set_link(ETH_GLOBAL_TypeDef *regs, struct phy_link_state *state)
632 {
633 	uint32_t reg = regs->MAC_CONFIGURATION;
634 	uint32_t val;
635 
636 	reg &= ~(ETH_MAC_CONFIGURATION_DM_Msk | ETH_MAC_CONFIGURATION_FES_Msk);
637 
638 	val = PHY_LINK_IS_FULL_DUPLEX(state->speed) ? ETH_LINK_DUPLEX_FULL :
639 						      ETH_LINK_DUPLEX_HALF;
640 	reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_DM_Msk, val);
641 
642 	val = PHY_LINK_IS_SPEED_100M(state->speed) ? ETH_LINK_SPEED_100M :
643 						     ETH_LINK_SPEED_10M;
644 	reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_FES_Msk, val);
645 
646 	regs->MAC_CONFIGURATION = reg;
647 }
648 
phy_link_state_changed(const struct device * phy_dev,struct phy_link_state * state,void * user_data)649 static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state,
650 				   void *user_data)
651 {
652 	struct device *dev = user_data;
653 	struct eth_xmc4xxx_data *dev_data = dev->data;
654 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
655 	bool is_up = state->is_up;
656 
657 	if (is_up && !dev_data->link_up) {
658 		LOG_INF("Link up");
659 		dev_data->link_up = true;
660 		net_eth_carrier_on(dev_data->iface);
661 		eth_xmc4xxx_set_link(dev_cfg->regs, state);
662 	} else if (!is_up && dev_data->link_up) {
663 		LOG_INF("Link down");
664 		dev_data->link_up = false;
665 		net_eth_carrier_off(dev_data->iface);
666 	}
667 }
668 
eth_xmc4xxx_iface_init(struct net_if * iface)669 static void eth_xmc4xxx_iface_init(struct net_if *iface)
670 {
671 	const struct device *dev = net_if_get_device(iface);
672 	struct eth_xmc4xxx_data *dev_data = dev->data;
673 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
674 
675 	dev_data->iface = iface;
676 
677 	net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr),
678 			     NET_LINK_ETHERNET);
679 
680 	ethernet_init(iface);
681 
682 	dev_cfg->irq_config_func();
683 
684 	/* Do not start the interface until PHY link is up */
685 	net_if_carrier_off(iface);
686 
687 	phy_link_callback_set(dev_cfg->phy_dev, &phy_link_state_changed, (void *)dev);
688 
689 	dev_cfg->regs->INTERRUPT_ENABLE |= ETH_STATUS_ALL_EVENTS;
690 
691 	eth_xmc4xxx_enable_tx(dev_cfg->regs);
692 	eth_xmc4xxx_enable_rx(dev_cfg->regs);
693 }
694 
695 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
eth_xmc4xxx_stats(const struct device * dev)696 static struct net_stats_eth *eth_xmc4xxx_stats(const struct device *dev)
697 {
698 	struct eth_xmc4xxx_data *dev_data = dev->data;
699 
700 	return &dev_data->stats;
701 }
702 #endif
703 
eth_xmc4xxx_free_rx_bufs(const struct device * dev)704 static inline void eth_xmc4xxx_free_rx_bufs(const struct device *dev)
705 {
706 	struct eth_xmc4xxx_data *dev_data = dev->data;
707 
708 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
709 		if (dev_data->rx_frag_list[i]) {
710 			net_buf_unref(dev_data->rx_frag_list[i]);
711 			dev_data->rx_frag_list[i] = NULL;
712 		}
713 	}
714 }
715 
eth_xmc4xxx_rx_dma_descriptors_init(const struct device * dev)716 static int eth_xmc4xxx_rx_dma_descriptors_init(const struct device *dev)
717 {
718 	struct eth_xmc4xxx_data *dev_data = dev->data;
719 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
720 
721 	dev_cfg->regs->RECEIVE_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&rx_dma_desc[0];
722 
723 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS - 1; i++) {
724 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i];
725 
726 		dma_desc->buffer2 = (volatile uint32_t)&rx_dma_desc[i + 1];
727 	}
728 
729 	rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER;
730 	rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&rx_dma_desc[0];
731 
732 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
733 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i];
734 		struct net_buf *rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE,
735 								     K_NO_WAIT);
736 
737 		if (rx_buf == NULL) {
738 			eth_xmc4xxx_free_rx_bufs(dev);
739 			LOG_ERR("Failed to reserve data net buffers");
740 			return -ENOBUFS;
741 		}
742 
743 		dev_data->rx_frag_list[i] = rx_buf;
744 		dma_desc->buffer1 = (uint32_t)rx_buf->data;
745 		dma_desc->length = rx_buf->size | ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK;
746 		dma_desc->status = ETH_MAC_DMA_RDES0_OWN;
747 	}
748 
749 	return 0;
750 }
751 
eth_xmc4xxx_reset(const struct device * dev)752 static inline int eth_xmc4xxx_reset(const struct device *dev)
753 {
754 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
755 
756 	dev_cfg->regs->BUS_MODE |= ETH_BUS_MODE_SWR_Msk;
757 
758 	/* reset may fail if the clocks are not properly setup */
759 	if (!WAIT_FOR((dev_cfg->regs->BUS_MODE & ETH_BUS_MODE_SWR_Msk) == 0,
760 		      ETH_RESET_TIMEOUT_USEC,)) {
761 		return -ETIMEDOUT;
762 	}
763 
764 	return 0;
765 }
766 
eth_xmc4xxx_set_mac_address(ETH_GLOBAL_TypeDef * regs,uint8_t * const addr)767 static inline void eth_xmc4xxx_set_mac_address(ETH_GLOBAL_TypeDef *regs, uint8_t *const addr)
768 {
769 	regs->MAC_ADDRESS0_HIGH = addr[4] | (addr[5] << 8);
770 	regs->MAC_ADDRESS0_LOW = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
771 }
772 
eth_xmc4xxx_mask_unused_interrupts(ETH_GLOBAL_TypeDef * regs)773 static inline void eth_xmc4xxx_mask_unused_interrupts(ETH_GLOBAL_TypeDef *regs)
774 {
775 	/* Disable Mac Management Counter (MMC) interrupt events */
776 	regs->MMC_TRANSMIT_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK;
777 	regs->MMC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK;
778 
779 	/* IPC - Receive IP checksum checker */
780 	regs->MMC_IPC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK;
781 
782 	/* Disable PMT and timestamp interrupt events */
783 	regs->INTERRUPT_MASK = ETH_INTERRUPT_MASK_PMTIM_Msk | ETH_INTERRUPT_MASK_TSIM_Msk;
784 }
785 
eth_xmc4xxx_init_timestamp_control_reg(ETH_GLOBAL_TypeDef * regs)786 static inline int eth_xmc4xxx_init_timestamp_control_reg(ETH_GLOBAL_TypeDef *regs)
787 {
788 #if defined(CONFIG_NET_GPTP)
789 	regs->TIMESTAMP_CONTROL = ETH_TIMESTAMP_CONTROL_TSENA_Msk |
790 				  ETH_TIMESTAMP_CONTROL_TSENALL_Msk;
791 #endif
792 
793 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
794 	/* use fine control */
795 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSCFUPDT_Msk |
796 				  ETH_TIMESTAMP_CONTROL_TSCTRLSSR_Msk;
797 
798 	/* make ptp run at 50MHz - implies 20ns increment for each increment of the */
799 	/* sub_second_register */
800 	regs->SUB_SECOND_INCREMENT = 20;
801 
802 	/* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz  */
803 	/* Therefore, K = ceil(f_out * 2^32 / f_cpu) */
804 
805 	uint32_t f_cpu = XMC_SCU_CLOCK_GetSystemClockFrequency();
806 	uint32_t K = (BIT64(32) * ETH_PTP_CLOCK_FREQUENCY  + f_cpu / 2) / f_cpu;
807 
808 	regs->TIMESTAMP_ADDEND = K;
809 
810 	/* Addend register update */
811 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk;
812 	if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0,
813 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
814 		return -ETIMEDOUT;
815 	}
816 
817 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk;
818 	if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0,
819 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
820 		return -ETIMEDOUT;
821 	}
822 #endif
823 	return 0;
824 }
825 
eth_xmc4xxx_init(const struct device * dev)826 static int eth_xmc4xxx_init(const struct device *dev)
827 {
828 	struct eth_xmc4xxx_data *dev_data = dev->data;
829 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
830 	XMC_ETH_MAC_PORT_CTRL_t port_ctrl;
831 	int ret;
832 
833 	sys_slist_init(&dev_data->tx_frame_list);
834 	k_sem_init(&dev_data->tx_desc_sem, NUM_TX_DMA_DESCRIPTORS,
835 					   NUM_TX_DMA_DESCRIPTORS);
836 
837 	if (!device_is_ready(dev_cfg->phy_dev)) {
838 		LOG_ERR("Phy device not ready");
839 		return -ENODEV;
840 	}
841 
842 	/* get the port control initialized by MDIO driver */
843 	port_ctrl.raw = ETH0_CON->CON;
844 	port_ctrl.raw |= dev_cfg->port_ctrl.raw;
845 
846 	XMC_ETH_MAC_Disable(NULL);
847 	ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
848 	if (ret) {
849 		return ret;
850 	}
851 
852 	XMC_ETH_MAC_SetPortControl(NULL, port_ctrl);
853 	XMC_ETH_MAC_Enable(NULL);
854 
855 	ret = eth_xmc4xxx_reset(dev);
856 	if (ret != 0) {
857 		LOG_ERR("Error resetting ethernet [%d]", ret);
858 		return ret;
859 	}
860 
861 	/* Initialize MAC configuration */
862 	/* enable checksum offload */
863 	dev_cfg->regs->MAC_CONFIGURATION = ETH_MAC_CONFIGURATION_IPC_Msk;
864 
865 	/* disable jumbo frames */
866 	dev_cfg->regs->MAC_CONFIGURATION &= ~ETH_MAC_CONFIGURATION_JE_Msk;
867 
868 
869 	/* Initialize Filter registers - disable zero quanta pause*/
870 	dev_cfg->regs->FLOW_CONTROL = ETH_FLOW_CONTROL_DZPQ_Msk;
871 
872 	/* rsf - receive store and forward */
873 	/* tsf - transmit store and forward */
874 	dev_cfg->regs->OPERATION_MODE = ETH_OPERATION_MODE_RSF_Msk | ETH_OPERATION_MODE_TSF_Msk |
875 					ETH_OPERATION_MODE_OSF_Msk;
876 
877 	/* Increase enhanced descriptor to 8 WORDS, required when the Advanced */
878 	/* Time-Stamp feature or Full IPC Offload Engine is enabled */
879 	dev_cfg->regs->BUS_MODE = ETH_BUS_MODE_ATDS_Msk | ETH_BUS_MODE_AAL_Msk |
880 				  ETH_BUS_MODE_FB_Msk | (0x20 << ETH_BUS_MODE_PBL_Pos);
881 
882 	eth_xmc4xxx_tx_dma_descriptors_init(dev);
883 	ret = eth_xmc4xxx_rx_dma_descriptors_init(dev);
884 	if (ret != 0) {
885 		return ret;
886 	}
887 
888 	/* Clear interrupts */
889 	dev_cfg->regs->STATUS = ETH_STATUS_CLEARABLE_BITS;
890 
891 	eth_xmc4xxx_mask_unused_interrupts(dev_cfg->regs);
892 
893 #if !DT_INST_NODE_HAS_PROP(0, local_mac_address)
894 	gen_random_mac(dev_data->mac_addr, INFINEON_OUI_B0, INFINEON_OUI_B1, INFINEON_OUI_B2);
895 #endif
896 	eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr);
897 
898 	uint32_t reg = dev_cfg->regs->MAC_FRAME_FILTER;
899 	/* enable reception of broadcast frames */
900 	reg &= ~ETH_MAC_FRAME_FILTER_DBF_Msk;
901 	/* pass all multicast frames */
902 	reg |= ETH_MAC_FRAME_FILTER_PM_Msk;
903 	dev_cfg->regs->MAC_FRAME_FILTER = reg;
904 
905 	return eth_xmc4xxx_init_timestamp_control_reg(dev_cfg->regs);
906 }
907 
eth_xmc4xxx_capabilities(const struct device * dev)908 static enum ethernet_hw_caps eth_xmc4xxx_capabilities(const struct device *dev)
909 {
910 	ARG_UNUSED(dev);
911 	enum ethernet_hw_caps caps =  ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
912 	       ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD;
913 
914 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
915 	caps |= ETHERNET_PTP;
916 #endif
917 
918 #if defined(CONFIG_NET_VLAN)
919 	caps |= ETHERNET_HW_VLAN;
920 #endif
921 
922 	return caps;
923 }
924 
eth_xmc4xxx_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)925 static int eth_xmc4xxx_set_config(const struct device *dev, enum ethernet_config_type type,
926 				  const struct ethernet_config *config)
927 {
928 	struct eth_xmc4xxx_data *dev_data = dev->data;
929 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
930 
931 	switch (type) {
932 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
933 		memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr));
934 		LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name,
935 			dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2],
936 			dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]);
937 
938 		eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr);
939 		net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
940 				     sizeof(dev_data->mac_addr), NET_LINK_ETHERNET);
941 		return 0;
942 	default:
943 		break;
944 	}
945 
946 	return -ENOTSUP;
947 }
948 
eth_xmc4xxx_irq_config(void)949 static void eth_xmc4xxx_irq_config(void)
950 {
951 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_xmc4xxx_isr,
952 		    DEVICE_DT_INST_GET(0), 0);
953 	irq_enable(DT_INST_IRQN(0));
954 }
955 
956 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
eth_xmc4xxx_get_ptp_clock(const struct device * dev)957 static const struct device *eth_xmc4xxx_get_ptp_clock(const struct device *dev)
958 {
959 	struct eth_xmc4xxx_data *dev_data = dev->data;
960 
961 	return dev_data->ptp_clock;
962 }
963 #endif
964 
965 
966 #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER)
eth_xmc4xxx_vlan_setup(const struct device * dev,struct net_if * iface,uint16_t tag,bool enable)967 int eth_xmc4xxx_vlan_setup(const struct device *dev, struct net_if *iface, uint16_t tag,
968 			   bool enable)
969 {
970 	ARG_UNUSED(iface);
971 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
972 
973 	LOG_INF("Configuring vlan %d", tag);
974 
975 	if (enable) {
976 		dev_cfg->regs->VLAN_TAG = FIELD_PREP(ETH_VLAN_TAG_VL_Msk, tag) |
977 					  ETH_VLAN_TAG_ETV_Msk |
978 					  ETH_VLAN_TAG_ESVL_Msk;
979 		dev_cfg->regs->MAC_FRAME_FILTER |= ETH_MAC_FRAME_FILTER_VTFE_Msk;
980 	} else {
981 		dev_cfg->regs->VLAN_TAG = 0;
982 		dev_cfg->regs->MAC_FRAME_FILTER &= ~ETH_MAC_FRAME_FILTER_VTFE_Msk;
983 	}
984 
985 	return 0;
986 }
987 #endif
988 
989 static const struct ethernet_api eth_xmc4xxx_api = {
990 	.iface_api.init = eth_xmc4xxx_iface_init,
991 	.send = eth_xmc4xxx_send,
992 	.set_config = eth_xmc4xxx_set_config,
993 	.get_capabilities = eth_xmc4xxx_capabilities,
994 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
995 	.get_stats = eth_xmc4xxx_stats,
996 #endif
997 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
998 	.get_ptp_clock = eth_xmc4xxx_get_ptp_clock,
999 #endif
1000 #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER)
1001 	.vlan_setup = eth_xmc4xxx_vlan_setup,
1002 #endif
1003 };
1004 
1005 PINCTRL_DT_INST_DEFINE(0);
1006 
1007 static struct eth_xmc4xxx_config eth_xmc4xxx_config = {
1008 	.regs = (ETH_GLOBAL_TypeDef *)DT_REG_ADDR(DT_INST_PARENT(0)),
1009 	.irq_config_func = eth_xmc4xxx_irq_config,
1010 	.phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle)),
1011 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1012 	.port_ctrl = {
1013 	    .rxd0 = DT_INST_ENUM_IDX(0, rxd0_port_ctrl),
1014 	    .rxd1 = DT_INST_ENUM_IDX(0, rxd1_port_ctrl),
1015 	    .rxd2 = DT_INST_ENUM_IDX_OR(0, rxd2_port_ctrl, 0),
1016 	    .rxd3 = DT_INST_ENUM_IDX_OR(0, rxd3_port_ctrl, 0),
1017 	    .clk_rmii = DT_INST_ENUM_IDX(0, rmii_rx_clk_port_ctrl),
1018 	    .crs_dv = DT_INST_ENUM_IDX(0, crs_rx_dv_port_ctrl),
1019 	    .crs = DT_INST_ENUM_IDX_OR(0, crs_port_ctrl, 0),
1020 	    .rxer = DT_INST_ENUM_IDX(0, rxer_port_ctrl),
1021 	    .col = DT_INST_ENUM_IDX_OR(0, col_port_ctrl, 0),
1022 	    .clk_tx = DT_INST_ENUM_IDX_OR(0, tx_clk_port_ctrl, 0),
1023 	    .mode = DT_INST_ENUM_IDX_OR(0, phy_connection_type, 0),
1024 	}
1025 };
1026 
1027 static struct eth_xmc4xxx_data eth_xmc4xxx_data = {
1028 	.mac_addr = DT_INST_PROP_OR(0, local_mac_address, {0}),
1029 };
1030 
1031 ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_xmc4xxx_init, NULL, &eth_xmc4xxx_data, &eth_xmc4xxx_config,
1032 			      CONFIG_ETH_INIT_PRIORITY, &eth_xmc4xxx_api, NET_ETH_MTU);
1033 
1034 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
1035 
1036 struct ptp_context {
1037 	const struct device *eth_dev;
1038 };
1039 
1040 static struct ptp_context ptp_xmc4xxx_context_0;
1041 
eth_xmc4xxx_ptp_clock_set(const struct device * dev,struct net_ptp_time * tm)1042 static int eth_xmc4xxx_ptp_clock_set(const struct device *dev, struct net_ptp_time *tm)
1043 {
1044 	struct ptp_context *ptp_context = dev->data;
1045 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1046 
1047 	dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = tm->nanosecond;
1048 	dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = tm->second;
1049 
1050 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk;
1051 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0,
1052 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1053 		return -ETIMEDOUT;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
eth_xmc4xxx_ptp_clock_get(const struct device * dev,struct net_ptp_time * tm)1059 static int eth_xmc4xxx_ptp_clock_get(const struct device *dev, struct net_ptp_time *tm)
1060 {
1061 	struct ptp_context *ptp_context = dev->data;
1062 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1063 
1064 	uint32_t nanosecond_0 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS;
1065 	uint32_t second_0 = dev_cfg->regs->SYSTEM_TIME_SECONDS;
1066 
1067 	uint32_t nanosecond_1 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS;
1068 	uint32_t second_1 = dev_cfg->regs->SYSTEM_TIME_SECONDS;
1069 
1070 	/* check that there is no roll over while we read the timestamp. If roll over happens */
1071 	/* just choose the later value */
1072 	if (second_0 == second_1) {
1073 		tm->second = second_0;
1074 		tm->nanosecond = nanosecond_0;
1075 	} else {
1076 		tm->second = second_1;
1077 		tm->nanosecond = nanosecond_1;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
eth_xmc4xxx_ptp_clock_adjust(const struct device * dev,int increment)1083 static int eth_xmc4xxx_ptp_clock_adjust(const struct device *dev, int increment)
1084 {
1085 	struct ptp_context *ptp_context = dev->data;
1086 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1087 	uint32_t increment_tmp;
1088 
1089 	if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) {
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (increment < 0) {
1094 		increment_tmp = -increment;
1095 		increment_tmp |= ETH_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_Msk;
1096 	} else {
1097 		increment_tmp = increment;
1098 	}
1099 
1100 	dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = increment_tmp;
1101 	dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = 0;
1102 
1103 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSUPDT_Msk;
1104 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSUPDT_Msk) == 0,
1105 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1106 		return -ETIMEDOUT;
1107 	}
1108 
1109 	return 0;
1110 }
1111 
eth_xmc4xxx_ptp_clock_rate_adjust(const struct device * dev,double ratio)1112 static int eth_xmc4xxx_ptp_clock_rate_adjust(const struct device *dev, double ratio)
1113 {
1114 	struct ptp_context *ptp_context = dev->data;
1115 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1116 	uint64_t K = dev_cfg->regs->TIMESTAMP_ADDEND;
1117 
1118 	if (ratio < ETH_PTP_RATE_ADJUST_RATIO_MIN || ratio > ETH_PTP_RATE_ADJUST_RATIO_MAX) {
1119 		return -EINVAL;
1120 	}
1121 
1122 	/* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz  */
1123 	K = K * ratio + 0.5;
1124 	if (K > UINT32_MAX) {
1125 		return -EINVAL;
1126 	}
1127 	dev_cfg->regs->TIMESTAMP_ADDEND = K;
1128 
1129 	/* Addend register update */
1130 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk;
1131 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0,
1132 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1133 		return -ETIMEDOUT;
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static const struct ptp_clock_driver_api ptp_api_xmc4xxx = {
1140 	.set = eth_xmc4xxx_ptp_clock_set,
1141 	.get = eth_xmc4xxx_ptp_clock_get,
1142 	.adjust = eth_xmc4xxx_ptp_clock_adjust,
1143 	.rate_adjust = eth_xmc4xxx_ptp_clock_rate_adjust,
1144 };
1145 
ptp_clock_xmc4xxx_init(const struct device * port)1146 static int ptp_clock_xmc4xxx_init(const struct device *port)
1147 {
1148 	const struct device *const eth_dev = DEVICE_DT_INST_GET(0);
1149 	struct eth_xmc4xxx_data *dev_data = eth_dev->data;
1150 	struct ptp_context *ptp_context = port->data;
1151 
1152 	dev_data->ptp_clock = port;
1153 	ptp_context->eth_dev = eth_dev;
1154 
1155 	return 0;
1156 }
1157 
1158 DEVICE_DEFINE(xmc4xxx_ptp_clock_0, PTP_CLOCK_NAME, ptp_clock_xmc4xxx_init, NULL,
1159 	      &ptp_xmc4xxx_context_0, NULL, POST_KERNEL, CONFIG_PTP_CLOCK_INIT_PRIORITY,
1160 	      &ptp_api_xmc4xxx);
1161 
1162 #endif /* CONFIG_PTP_CLOCK_XMC4XXX */
1163