1 /* XMC4XXX Ethernet controller
2  *
3  * Copyright (c) 2023 SLB
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT infineon_xmc4xxx_ethernet
9 
10 #include "eth.h"
11 
12 #include <stdint.h>
13 
14 #include <soc.h>
15 #include <zephyr/device.h>
16 #include <zephyr/drivers/pinctrl.h>
17 #include <zephyr/drivers/ptp_clock.h>
18 #include <zephyr/net/ethernet.h>
19 #include <zephyr/net/gptp.h>
20 #include <zephyr/net/net_pkt.h>
21 #include <zephyr/net/net_if.h>
22 #include <zephyr/net/phy.h>
23 #include <zephyr/kernel.h>
24 #include <zephyr/sys/slist.h>
25 #include <zephyr/sys/util.h>
26 
27 #include <ethernet/eth_stats.h>
28 
29 #include <xmc_eth_mac.h>
30 #include <xmc_scu.h>
31 
32 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
33 #include <zephyr/logging/log.h>
34 LOG_MODULE_REGISTER(eth_xmc4xxx);
35 
36 #define NUM_TX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_TX_DMA_DESCRIPTORS
37 #define NUM_RX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_RX_DMA_DESCRIPTORS
38 
39 #define ETH_NODE DT_NODELABEL(eth)
40 #define PHY_NODE DT_PHANDLE_BY_IDX(ETH_NODE, phy, 0)
41 
42 #define INFINEON_OUI_B0 0x00
43 #define INFINEON_OUI_B1 0x03
44 #define INFINEON_OUI_B2 0x19
45 
46 #define MODULO_INC_TX(val) {(val) = (++(val) < NUM_TX_DMA_DESCRIPTORS) ? (val) : 0; }
47 #define MODULO_INC_RX(val) {(val) = (++(val) < NUM_RX_DMA_DESCRIPTORS) ? (val) : 0; }
48 
49 #define IS_OWNED_BY_DMA_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_OWN) != 0)
50 #define IS_OWNED_BY_DMA_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_OWN) != 0)
51 
52 #define IS_START_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_FS) != 0)
53 #define IS_END_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_LS) != 0)
54 
55 #define IS_TIMESTAMP_AVAILABLE_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_TSA) != 0)
56 #define IS_TIMESTAMP_AVAILABLE_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_TTSS) != 0)
57 
58 #define TOTAL_FRAME_LENGTH(desc) (FIELD_GET(ETH_MAC_DMA_RDES0_FL, (desc)->status) - 4)
59 
60 #define ETH_STATUS_ERROR_TRANSMIT_EVENTS                                                           \
61 	(XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_TRANSMIT_JABBER_TIMEOUT |                 \
62 	 XMC_ETH_MAC_EVENT_TRANSMIT_UNDERFLOW | XMC_ETH_MAC_EVENT_TRANSMIT_PROCESS_STOPPED)
63 
64 #define ETH_STATUS_ERROR_RECEIVE_EVENTS                                                            \
65 	(XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_RECEIVE_OVERFLOW)
66 
67 #define ETH_STATUS_ALL_EVENTS                                                                      \
68 	(ETH_STATUS_ERROR_TRANSMIT_EVENTS | ETH_STATUS_ERROR_RECEIVE_EVENTS |                      \
69 	 XMC_ETH_MAC_EVENT_RECEIVE | XMC_ETH_MAC_EVENT_TRANSMIT | ETH_INTERRUPT_ENABLE_NIE_Msk |   \
70 	 ETH_INTERRUPT_ENABLE_AIE_Msk)
71 
72 #define ETH_MAC_DISABLE_MMC_INTERRUPT_MSK              0x03ffffffu
73 #define ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK  0x3fff3fffu
74 
75 #define ETH_STATUS_CLEARABLE_BITS 0x1e7ffu
76 
77 #define ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK BIT(14)
78 
79 #define ETH_RESET_TIMEOUT_USEC 200000u
80 #define ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC 100000u
81 
82 #define ETH_LINK_SPEED_10M 0
83 #define ETH_LINK_SPEED_100M 1
84 
85 #define ETH_LINK_DUPLEX_HALF 0
86 #define ETH_LINK_DUPLEX_FULL 1
87 
88 #define ETH_PTP_CLOCK_FREQUENCY 50000000
89 #define ETH_PTP_RATE_ADJUST_RATIO_MIN 0.9
90 #define ETH_PTP_RATE_ADJUST_RATIO_MAX 1.1
91 
92 struct eth_xmc4xxx_data {
93 	struct net_if *iface;
94 	uint8_t mac_addr[6];
95 	struct k_sem tx_desc_sem;
96 	bool link_up;
97 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
98 	struct net_stats_eth stats;
99 #endif
100 	bool tx_frames_flushed;
101 	uint16_t dma_desc_tx_head;
102 	uint16_t dma_desc_rx_tail;
103 	sys_slist_t tx_frame_list;
104 	struct net_buf *rx_frag_list[NUM_RX_DMA_DESCRIPTORS];
105 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
106 	const struct device *ptp_clock;
107 #endif
108 };
109 
110 struct eth_xmc4xxx_config {
111 	ETH_GLOBAL_TypeDef *regs;
112 	const struct device *phy_dev;
113 	void (*irq_config_func)(void);
114 	const struct pinctrl_dev_config *pcfg;
115 	const uint8_t phy_connection_type;
116 	XMC_ETH_MAC_PORT_CTRL_t port_ctrl;
117 };
118 
119 struct eth_xmc4xxx_tx_frame {
120 	sys_snode_t node;
121 	struct net_pkt *pkt;
122 	uint16_t tail_index;
123 	uint16_t head_index;
124 };
125 
126 K_MEM_SLAB_DEFINE_STATIC(tx_frame_slab, sizeof(struct eth_xmc4xxx_tx_frame),
127 			 CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE, 4);
128 
129 static XMC_ETH_MAC_DMA_DESC_t __aligned(4) tx_dma_desc[NUM_TX_DMA_DESCRIPTORS];
130 static XMC_ETH_MAC_DMA_DESC_t __aligned(4) rx_dma_desc[NUM_RX_DMA_DESCRIPTORS];
131 
get_iface(struct eth_xmc4xxx_data * ctx)132 static inline struct net_if *get_iface(struct eth_xmc4xxx_data *ctx)
133 {
134 	return ctx->iface;
135 }
136 
eth_xmc4xxx_tx_dma_descriptors_init(const struct device * dev)137 static void eth_xmc4xxx_tx_dma_descriptors_init(const struct device *dev)
138 {
139 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
140 
141 	memset(tx_dma_desc, 0, sizeof(tx_dma_desc));
142 
143 	dev_cfg->regs->TRANSMIT_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&tx_dma_desc[0];
144 
145 	/* chain the descriptors */
146 	for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS - 1; i++) {
147 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[i];
148 
149 		dma_desc->buffer2 = (volatile uint32_t)&tx_dma_desc[i + 1];
150 	}
151 
152 	/* TER: transmit end of ring - it is the last descriptor in ring */
153 	tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER;
154 	tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&tx_dma_desc[0];
155 }
156 
eth_xmc4xxx_flush_rx(const struct device * dev)157 static void eth_xmc4xxx_flush_rx(const struct device *dev)
158 {
159 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
160 	struct eth_xmc4xxx_data *dev_data = dev->data;
161 
162 	dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_SR_Msk;
163 
164 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
165 		rx_dma_desc[i].status = ETH_MAC_DMA_RDES0_OWN;
166 	}
167 
168 	dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk;
169 	dev_data->dma_desc_rx_tail = 0;
170 }
171 
eth_xmc4xxx_flush_tx(const struct device * dev)172 static void eth_xmc4xxx_flush_tx(const struct device *dev)
173 {
174 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
175 	struct eth_xmc4xxx_data *dev_data = dev->data;
176 	sys_snode_t *node;
177 
178 	LOG_DBG("Flushing tx frames");
179 
180 	if (dev_data->tx_frames_flushed) {
181 		return;
182 	}
183 
184 	dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_ST_Msk;
185 
186 	node = sys_slist_get(&dev_data->tx_frame_list);
187 	while (node) {
188 		struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node);
189 
190 		net_pkt_unref(tx_frame->pkt);
191 		k_mem_slab_free(&tx_frame_slab, (void *)tx_frame);
192 
193 		node = sys_slist_get(&dev_data->tx_frame_list);
194 #ifdef CONFIG_NET_STATISTICS_ETHERNET
195 		dev_data->stats.errors.tx++;
196 		dev_data->stats.error_details.tx_aborted_errors++;
197 #endif
198 	}
199 
200 	k_sem_reset(&dev_data->tx_desc_sem);
201 
202 	eth_xmc4xxx_tx_dma_descriptors_init(dev);
203 	dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk;
204 	dev_data->dma_desc_tx_head = 0;
205 	dev_data->tx_frames_flushed = true;
206 
207 	for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS; i++) {
208 		k_sem_give(&dev_data->tx_desc_sem);
209 	}
210 }
211 
eth_xmc4xxx_trigger_dma_tx(ETH_GLOBAL_TypeDef * regs)212 static inline void eth_xmc4xxx_trigger_dma_tx(ETH_GLOBAL_TypeDef *regs)
213 {
214 	regs->STATUS = ETH_STATUS_TPS_Msk;
215 	regs->TRANSMIT_POLL_DEMAND = 0;
216 }
217 
eth_xmc4xxx_trigger_dma_rx(ETH_GLOBAL_TypeDef * regs)218 static inline void eth_xmc4xxx_trigger_dma_rx(ETH_GLOBAL_TypeDef *regs)
219 {
220 	regs->STATUS = ETH_STATUS_RU_Msk;
221 	regs->RECEIVE_POLL_DEMAND = 0U;
222 }
223 
eth_xmc4xxx_send(const struct device * dev,struct net_pkt * pkt)224 static int eth_xmc4xxx_send(const struct device *dev, struct net_pkt *pkt)
225 {
226 	struct eth_xmc4xxx_data *dev_data = dev->data;
227 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
228 	struct net_buf *frag;
229 	uint8_t *frag_data;
230 	uint16_t frag_len;
231 	int ret = 0;
232 	XMC_ETH_MAC_DMA_DESC_t *dma_desc = NULL;
233 	struct eth_xmc4xxx_tx_frame *tx_frame;
234 	int num_frags = 0;
235 	bool first_descriptor = false;
236 
237 	frag = pkt->frags;
238 	while (frag) {
239 		num_frags++;
240 		frag = frag->frags;
241 	}
242 
243 	if (num_frags > NUM_TX_DMA_DESCRIPTORS) {
244 #ifdef CONFIG_NET_STATISTICS_ETHERNET
245 		dev_data->stats.error_details.tx_dma_failed++;
246 #endif
247 		LOG_DBG("Number of fragments exceeds total descriptors. Dropping packet");
248 		return -ENOMEM;
249 	}
250 
251 	/* All available frames buffered inside the driver. Apply back pressure in the driver. */
252 	while (tx_frame_slab.info.num_used == CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE) {
253 		eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs);
254 		k_yield();
255 	}
256 
257 	ret = k_mem_slab_alloc(&tx_frame_slab, (void **)&tx_frame, K_NO_WAIT);
258 	__ASSERT_NO_MSG(ret == 0);
259 
260 	net_pkt_ref(pkt);
261 
262 	dev_data->tx_frames_flushed = false;
263 
264 	first_descriptor = true;
265 	tx_frame->pkt = pkt;
266 	tx_frame->tail_index = dev_data->dma_desc_tx_head;
267 
268 	frag = pkt->frags;
269 	while (frag) {
270 		ret = k_sem_take(&dev_data->tx_desc_sem, K_FOREVER);
271 		/* isr may call k_sem_reset() */
272 		if (ret < 0 || dev_data->tx_frames_flushed) {
273 			k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame);
274 			net_pkt_unref(pkt);
275 #ifdef CONFIG_NET_STATISTICS_ETHERNET
276 			dev_data->stats.error_details.tx_aborted_errors++;
277 #endif
278 			LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR.");
279 			return -EIO;
280 		}
281 
282 		unsigned int key = irq_lock();
283 		/* Critical section for dma_desc_tx_head and tx_dma_desc. Isr may */
284 		/* reinitialize the descriptors and set dma_desc_tx_head to 0 */
285 
286 		dma_desc = &tx_dma_desc[dev_data->dma_desc_tx_head];
287 
288 		frag_data = frag->data;
289 		frag_len = frag->len;
290 
291 		dma_desc->buffer1 = (volatile uint32_t)frag_data;
292 		dma_desc->length = frag_len;
293 
294 		/* give ownership of descriptor back to dma and set checksum offload */
295 		/* TCH we are using a circular list */
296 		dma_desc->status = ETH_MAC_DMA_TDES0_CIC | ETH_MAC_DMA_TDES0_TCH;
297 
298 		if (!first_descriptor) {
299 			/* Delay giving ownership of first frag to DMA. Prevents race condition */
300 			/* where second other frags are not ready */
301 			dma_desc->status |= ETH_MAC_DMA_TDES0_OWN;
302 		} else {
303 			dma_desc->status |= ETH_MAC_DMA_TDES0_FS;
304 
305 #if defined(CONFIG_NET_GPTP)
306 			struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
307 
308 			if (ntohs(hdr->type) == NET_ETH_PTYPE_PTP) {
309 				dma_desc->status |= ETH_MAC_DMA_TDES0_TTSE;
310 			}
311 #endif
312 		}
313 		first_descriptor = false;
314 
315 		tx_frame->head_index = dev_data->dma_desc_tx_head;
316 
317 		MODULO_INC_TX(dev_data->dma_desc_tx_head);
318 
319 		irq_unlock(key);
320 
321 		frag = frag->frags;
322 	}
323 
324 	if (dev_data->tx_frames_flushed) {
325 		k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame);
326 		net_pkt_unref(pkt);
327 #ifdef CONFIG_NET_STATISTICS_ETHERNET
328 		dev_data->stats.error_details.tx_aborted_errors++;
329 #endif
330 		LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR.");
331 		return -EIO;
332 	}
333 
334 	unsigned int key = irq_lock();
335 
336 	/* label last dma descriptor as last segment and trigger interrupt on last segment */
337 	dma_desc->status |= ETH_MAC_DMA_TDES0_IC | ETH_MAC_DMA_TDES0_LS;
338 
339 	/* Finally give ownership of first frag to DMA. After this point the DMA engine */
340 	/* may transfer the whole frame from RAM to Ethernet */
341 	tx_dma_desc[tx_frame->tail_index].status |= ETH_MAC_DMA_TDES0_OWN;
342 
343 	sys_slist_append(&dev_data->tx_frame_list, &tx_frame->node);
344 
345 	eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs);
346 
347 	irq_unlock(key);
348 
349 	return 0;
350 }
351 
eth_xmc4xxx_rx_pkt(const struct device * dev)352 static struct net_pkt *eth_xmc4xxx_rx_pkt(const struct device *dev)
353 {
354 	struct eth_xmc4xxx_data *dev_data = dev->data;
355 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
356 	struct net_pkt *pkt = NULL;
357 	struct net_buf *new_frag;
358 
359 	bool eof_found = false;
360 	uint16_t tail;
361 	XMC_ETH_MAC_DMA_DESC_t *dma_desc;
362 	int num_frags = 0;
363 	uint16_t frame_end_index;
364 	struct net_buf *frag, *last_frag = NULL;
365 
366 	tail = dev_data->dma_desc_rx_tail;
367 	dma_desc = &rx_dma_desc[tail];
368 
369 	if (IS_OWNED_BY_DMA_RX(dma_desc)) {
370 		return NULL;
371 	}
372 
373 	if (!IS_START_OF_FRAME_RX(dma_desc)) {
374 		/* handle this error - missing SOF packet? */
375 		eth_xmc4xxx_flush_rx(dev);
376 		return NULL;
377 	}
378 
379 	while (!IS_OWNED_BY_DMA_RX(dma_desc)) {
380 		eof_found = IS_END_OF_FRAME_RX(dma_desc);
381 		num_frags++;
382 		if (eof_found) {
383 			break;
384 		}
385 
386 		MODULO_INC_RX(tail);
387 
388 		if (tail == dev_data->dma_desc_rx_tail) {
389 			/* wrapped */
390 			break;
391 		}
392 
393 		dma_desc = &rx_dma_desc[tail];
394 	}
395 
396 	if (!eof_found) {
397 		return NULL;
398 	}
399 
400 	frame_end_index = tail;
401 
402 	pkt = net_pkt_rx_alloc(K_NO_WAIT);
403 	if (pkt == NULL) {
404 #ifdef CONFIG_NET_STATISTICS_ETHERNET
405 		dev_data->stats.errors.rx++;
406 		dev_data->stats.error_details.rx_no_buffer_count++;
407 #endif
408 		LOG_DBG("Net packet allocation error");
409 		/* continue because we still need to read out the packet */
410 	}
411 
412 	tail = dev_data->dma_desc_rx_tail;
413 	dma_desc = &rx_dma_desc[tail];
414 	for (;;) {
415 		if (pkt != NULL) {
416 			uint16_t frag_len = CONFIG_NET_BUF_DATA_SIZE;
417 
418 			frag = dev_data->rx_frag_list[tail];
419 			if (tail == frame_end_index) {
420 				frag_len = TOTAL_FRAME_LENGTH(dma_desc) -
421 					   CONFIG_NET_BUF_DATA_SIZE * (num_frags - 1);
422 
423 				if (IS_TIMESTAMP_AVAILABLE_RX(dma_desc)) {
424 					struct net_ptp_time timestamp = {
425 						.second = dma_desc->time_stamp_seconds,
426 						.nanosecond = dma_desc->time_stamp_nanoseconds};
427 
428 					net_pkt_set_timestamp(pkt, &timestamp);
429 					net_pkt_set_priority(pkt, NET_PRIORITY_CA);
430 				}
431 			}
432 
433 			new_frag = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT);
434 			if (new_frag == NULL) {
435 #ifdef CONFIG_NET_STATISTICS_ETHERNET
436 				dev_data->stats.errors.rx++;
437 				dev_data->stats.error_details.rx_buf_alloc_failed++;
438 #endif
439 				LOG_DBG("Frag allocation error. Increase CONFIG_NET_BUF_RX_COUNT.");
440 				net_pkt_unref(pkt);
441 				pkt = NULL;
442 			} else {
443 				net_buf_add(frag, frag_len);
444 				if (!last_frag) {
445 					net_pkt_frag_insert(pkt, frag);
446 				} else {
447 					net_buf_frag_insert(last_frag, frag);
448 				}
449 
450 				last_frag = frag;
451 				frag = new_frag;
452 				dev_data->rx_frag_list[tail] = frag;
453 			}
454 		}
455 
456 		dma_desc->buffer1 = (uint32_t)dev_data->rx_frag_list[tail]->data;
457 		dma_desc->length = dev_data->rx_frag_list[tail]->size |
458 				   ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK;
459 		dma_desc->status = ETH_MAC_DMA_RDES0_OWN;
460 
461 		if (tail == frame_end_index) {
462 			break;
463 		}
464 
465 		MODULO_INC_RX(tail);
466 		dma_desc = &rx_dma_desc[tail];
467 	}
468 
469 
470 	MODULO_INC_RX(tail);
471 	dev_data->dma_desc_rx_tail = tail;
472 
473 	eth_xmc4xxx_trigger_dma_rx(dev_cfg->regs);
474 
475 	return pkt;
476 }
477 
eth_xmc4xxx_handle_rx(const struct device * dev)478 static void eth_xmc4xxx_handle_rx(const struct device *dev)
479 {
480 	struct eth_xmc4xxx_data *dev_data = dev->data;
481 	struct net_pkt *pkt = NULL;
482 
483 	for (;;) {
484 		pkt = eth_xmc4xxx_rx_pkt(dev);
485 		if (!pkt) {
486 			return;
487 		}
488 
489 		if (net_recv_data(get_iface(dev_data), pkt) < 0) {
490 			eth_stats_update_errors_rx(get_iface(dev_data));
491 			net_pkt_unref(pkt);
492 		}
493 	}
494 }
495 
eth_xmc4xxx_handle_tx(const struct device * dev)496 static void eth_xmc4xxx_handle_tx(const struct device *dev)
497 {
498 	struct eth_xmc4xxx_data *dev_data = dev->data;
499 	sys_snode_t *node = sys_slist_peek_head(&dev_data->tx_frame_list);
500 
501 	while (node) {
502 		struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node);
503 		bool owned_by_mcu = true;
504 		uint8_t index;
505 		int num_descriptors;
506 
507 		if (tx_frame->head_index >= tx_frame->tail_index) {
508 			num_descriptors = tx_frame->head_index - tx_frame->tail_index + 1;
509 		} else {
510 			num_descriptors = tx_frame->head_index + NUM_TX_DMA_DESCRIPTORS -
511 					  tx_frame->tail_index + 1;
512 		}
513 
514 		index = tx_frame->tail_index;
515 		for (int i = 0; i < num_descriptors; i++) {
516 			if (IS_OWNED_BY_DMA_TX(&tx_dma_desc[index])) {
517 				owned_by_mcu = false;
518 				break;
519 			}
520 
521 			MODULO_INC_TX(index);
522 		}
523 
524 		if (owned_by_mcu) {
525 #if defined(CONFIG_NET_GPTP)
526 			XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[tx_frame->head_index];
527 
528 			if (IS_TIMESTAMP_AVAILABLE_TX(dma_desc)) {
529 				struct net_pkt *pkt = tx_frame->pkt;
530 
531 				if (atomic_get(&pkt->atomic_ref) > 1) {
532 					struct net_ptp_time timestamp = {
533 						.second = dma_desc->time_stamp_seconds,
534 						.nanosecond = dma_desc->time_stamp_nanoseconds};
535 
536 					net_pkt_set_timestamp(pkt, &timestamp);
537 					net_if_add_tx_timestamp(pkt);
538 				}
539 			}
540 #endif
541 
542 			for (int i = 0; i < num_descriptors; i++) {
543 				k_sem_give(&dev_data->tx_desc_sem);
544 			}
545 
546 			sys_slist_get(&dev_data->tx_frame_list);
547 			net_pkt_unref(tx_frame->pkt);
548 			k_mem_slab_free(&tx_frame_slab, (void *)tx_frame);
549 			node = sys_slist_peek_head(&dev_data->tx_frame_list);
550 		} else {
551 			node = NULL;
552 		}
553 	}
554 }
555 
eth_xmc4xxx_isr(const struct device * dev)556 static void eth_xmc4xxx_isr(const struct device *dev)
557 {
558 	uint32_t lock;
559 	uint32_t status;
560 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
561 
562 	lock = irq_lock();
563 	status = dev_cfg->regs->STATUS;
564 
565 	if ((status & XMC_ETH_MAC_EVENT_RECEIVE) != 0) {
566 		eth_xmc4xxx_handle_rx(dev);
567 	}
568 
569 	if ((status & XMC_ETH_MAC_EVENT_TRANSMIT) != 0) {
570 		eth_xmc4xxx_handle_tx(dev);
571 	}
572 
573 	if ((status & ETH_STATUS_ERROR_TRANSMIT_EVENTS) != 0) {
574 		LOG_ERR("Transmit error event [0x%x]", status);
575 		eth_xmc4xxx_flush_tx(dev);
576 	}
577 
578 	if ((status & ETH_STATUS_ERROR_RECEIVE_EVENTS) != 0) {
579 		LOG_ERR("Receive error event [0x%x]", status);
580 		eth_xmc4xxx_flush_rx(dev);
581 	}
582 
583 	dev_cfg->regs->STATUS = status & ETH_STATUS_CLEARABLE_BITS;
584 
585 	irq_unlock(lock);
586 }
587 
eth_xmc4xxx_enable_tx(ETH_GLOBAL_TypeDef * regs)588 static inline void eth_xmc4xxx_enable_tx(ETH_GLOBAL_TypeDef *regs)
589 {
590 	regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk;
591 	regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_TE_Msk;
592 }
593 
eth_xmc4xxx_enable_rx(ETH_GLOBAL_TypeDef * regs)594 static inline void eth_xmc4xxx_enable_rx(ETH_GLOBAL_TypeDef *regs)
595 {
596 	regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk;
597 	regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_RE_Msk;
598 }
599 
eth_xmc4xxx_set_link(ETH_GLOBAL_TypeDef * regs,struct phy_link_state * state)600 static inline void eth_xmc4xxx_set_link(ETH_GLOBAL_TypeDef *regs, struct phy_link_state *state)
601 {
602 	uint32_t reg = regs->MAC_CONFIGURATION;
603 	uint32_t val;
604 
605 	reg &= ~(ETH_MAC_CONFIGURATION_DM_Msk | ETH_MAC_CONFIGURATION_FES_Msk);
606 
607 	val = PHY_LINK_IS_FULL_DUPLEX(state->speed) ? ETH_LINK_DUPLEX_FULL :
608 						      ETH_LINK_DUPLEX_HALF;
609 	reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_DM_Msk, val);
610 
611 	val = PHY_LINK_IS_SPEED_100M(state->speed) ? ETH_LINK_SPEED_100M :
612 						     ETH_LINK_SPEED_10M;
613 	reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_FES_Msk, val);
614 
615 	regs->MAC_CONFIGURATION = reg;
616 }
617 
phy_link_state_changed(const struct device * phy_dev,struct phy_link_state * state,void * user_data)618 static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state,
619 				   void *user_data)
620 {
621 	struct device *dev = user_data;
622 	struct eth_xmc4xxx_data *dev_data = dev->data;
623 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
624 	bool is_up = state->is_up;
625 
626 	if (is_up && !dev_data->link_up) {
627 		LOG_INF("Link up");
628 		dev_data->link_up = true;
629 		net_eth_carrier_on(dev_data->iface);
630 		eth_xmc4xxx_set_link(dev_cfg->regs, state);
631 	} else if (!is_up && dev_data->link_up) {
632 		LOG_INF("Link down");
633 		dev_data->link_up = false;
634 		net_eth_carrier_off(dev_data->iface);
635 	}
636 }
637 
eth_xmc4xxx_get_phy(const struct device * dev)638 static const struct device *eth_xmc4xxx_get_phy(const struct device *dev)
639 {
640 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
641 
642 	return dev_cfg->phy_dev;
643 }
644 
eth_xmc4xxx_iface_init(struct net_if * iface)645 static void eth_xmc4xxx_iface_init(struct net_if *iface)
646 {
647 	const struct device *dev = net_if_get_device(iface);
648 	struct eth_xmc4xxx_data *dev_data = dev->data;
649 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
650 
651 	dev_data->iface = iface;
652 
653 	net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr),
654 			     NET_LINK_ETHERNET);
655 
656 	ethernet_init(iface);
657 
658 	dev_cfg->irq_config_func();
659 
660 	/* Do not start the interface until PHY link is up */
661 	net_if_carrier_off(iface);
662 
663 	phy_link_callback_set(dev_cfg->phy_dev, &phy_link_state_changed, (void *)dev);
664 
665 	dev_cfg->regs->INTERRUPT_ENABLE |= ETH_STATUS_ALL_EVENTS;
666 
667 	eth_xmc4xxx_enable_tx(dev_cfg->regs);
668 	eth_xmc4xxx_enable_rx(dev_cfg->regs);
669 }
670 
671 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
eth_xmc4xxx_stats(const struct device * dev)672 static struct net_stats_eth *eth_xmc4xxx_stats(const struct device *dev)
673 {
674 	struct eth_xmc4xxx_data *dev_data = dev->data;
675 
676 	return &dev_data->stats;
677 }
678 #endif
679 
eth_xmc4xxx_free_rx_bufs(const struct device * dev)680 static inline void eth_xmc4xxx_free_rx_bufs(const struct device *dev)
681 {
682 	struct eth_xmc4xxx_data *dev_data = dev->data;
683 
684 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
685 		if (dev_data->rx_frag_list[i]) {
686 			net_buf_unref(dev_data->rx_frag_list[i]);
687 			dev_data->rx_frag_list[i] = NULL;
688 		}
689 	}
690 }
691 
eth_xmc4xxx_rx_dma_descriptors_init(const struct device * dev)692 static int eth_xmc4xxx_rx_dma_descriptors_init(const struct device *dev)
693 {
694 	struct eth_xmc4xxx_data *dev_data = dev->data;
695 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
696 
697 	dev_cfg->regs->RECEIVE_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&rx_dma_desc[0];
698 
699 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS - 1; i++) {
700 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i];
701 
702 		dma_desc->buffer2 = (volatile uint32_t)&rx_dma_desc[i + 1];
703 	}
704 
705 	rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER;
706 	rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&rx_dma_desc[0];
707 
708 	for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) {
709 		XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i];
710 		struct net_buf *rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE,
711 								     K_NO_WAIT);
712 
713 		if (rx_buf == NULL) {
714 			eth_xmc4xxx_free_rx_bufs(dev);
715 			LOG_ERR("Failed to reserve data net buffers");
716 			return -ENOBUFS;
717 		}
718 
719 		dev_data->rx_frag_list[i] = rx_buf;
720 		dma_desc->buffer1 = (uint32_t)rx_buf->data;
721 		dma_desc->length = rx_buf->size | ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK;
722 		dma_desc->status = ETH_MAC_DMA_RDES0_OWN;
723 	}
724 
725 	return 0;
726 }
727 
eth_xmc4xxx_reset(const struct device * dev)728 static inline int eth_xmc4xxx_reset(const struct device *dev)
729 {
730 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
731 
732 	dev_cfg->regs->BUS_MODE |= ETH_BUS_MODE_SWR_Msk;
733 
734 	/* reset may fail if the clocks are not properly setup */
735 	if (!WAIT_FOR((dev_cfg->regs->BUS_MODE & ETH_BUS_MODE_SWR_Msk) == 0,
736 		      ETH_RESET_TIMEOUT_USEC,)) {
737 		return -ETIMEDOUT;
738 	}
739 
740 	return 0;
741 }
742 
eth_xmc4xxx_set_mac_address(ETH_GLOBAL_TypeDef * regs,uint8_t * const addr)743 static inline void eth_xmc4xxx_set_mac_address(ETH_GLOBAL_TypeDef *regs, uint8_t *const addr)
744 {
745 	regs->MAC_ADDRESS0_HIGH = addr[4] | (addr[5] << 8);
746 	regs->MAC_ADDRESS0_LOW = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
747 }
748 
eth_xmc4xxx_mask_unused_interrupts(ETH_GLOBAL_TypeDef * regs)749 static inline void eth_xmc4xxx_mask_unused_interrupts(ETH_GLOBAL_TypeDef *regs)
750 {
751 	/* Disable Mac Management Counter (MMC) interrupt events */
752 	regs->MMC_TRANSMIT_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK;
753 	regs->MMC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK;
754 
755 	/* IPC - Receive IP checksum checker */
756 	regs->MMC_IPC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK;
757 
758 	/* Disable PMT and timestamp interrupt events */
759 	regs->INTERRUPT_MASK = ETH_INTERRUPT_MASK_PMTIM_Msk | ETH_INTERRUPT_MASK_TSIM_Msk;
760 }
761 
eth_xmc4xxx_init_timestamp_control_reg(ETH_GLOBAL_TypeDef * regs)762 static inline int eth_xmc4xxx_init_timestamp_control_reg(ETH_GLOBAL_TypeDef *regs)
763 {
764 #if defined(CONFIG_NET_GPTP)
765 	regs->TIMESTAMP_CONTROL = ETH_TIMESTAMP_CONTROL_TSENA_Msk |
766 				  ETH_TIMESTAMP_CONTROL_TSENALL_Msk;
767 #endif
768 
769 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
770 	/* use fine control */
771 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSCFUPDT_Msk |
772 				  ETH_TIMESTAMP_CONTROL_TSCTRLSSR_Msk;
773 
774 	/* make ptp run at 50MHz - implies 20ns increment for each increment of the */
775 	/* sub_second_register */
776 	regs->SUB_SECOND_INCREMENT = 20;
777 
778 	/* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz  */
779 	/* Therefore, K = ceil(f_out * 2^32 / f_cpu) */
780 
781 	uint32_t f_cpu = XMC_SCU_CLOCK_GetSystemClockFrequency();
782 	uint32_t K = (BIT64(32) * ETH_PTP_CLOCK_FREQUENCY  + f_cpu / 2) / f_cpu;
783 
784 	regs->TIMESTAMP_ADDEND = K;
785 
786 	/* Addend register update */
787 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk;
788 	if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0,
789 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
790 		return -ETIMEDOUT;
791 	}
792 
793 	regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk;
794 	if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0,
795 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
796 		return -ETIMEDOUT;
797 	}
798 #endif
799 	return 0;
800 }
801 
eth_xmc4xxx_init(const struct device * dev)802 static int eth_xmc4xxx_init(const struct device *dev)
803 {
804 	struct eth_xmc4xxx_data *dev_data = dev->data;
805 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
806 	XMC_ETH_MAC_PORT_CTRL_t port_ctrl;
807 	int ret;
808 
809 	sys_slist_init(&dev_data->tx_frame_list);
810 	k_sem_init(&dev_data->tx_desc_sem, NUM_TX_DMA_DESCRIPTORS,
811 					   NUM_TX_DMA_DESCRIPTORS);
812 
813 	if (!device_is_ready(dev_cfg->phy_dev)) {
814 		LOG_ERR("Phy device not ready");
815 		return -ENODEV;
816 	}
817 
818 	/* get the port control initialized by MDIO driver */
819 	port_ctrl.raw = ETH0_CON->CON;
820 	port_ctrl.raw |= dev_cfg->port_ctrl.raw;
821 
822 	XMC_ETH_MAC_Disable(NULL);
823 	ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT);
824 	if (ret) {
825 		return ret;
826 	}
827 
828 	XMC_ETH_MAC_SetPortControl(NULL, port_ctrl);
829 	XMC_ETH_MAC_Enable(NULL);
830 
831 	ret = eth_xmc4xxx_reset(dev);
832 	if (ret != 0) {
833 		LOG_ERR("Error resetting ethernet [%d]", ret);
834 		return ret;
835 	}
836 
837 	/* Initialize MAC configuration */
838 	/* enable checksum offload */
839 	dev_cfg->regs->MAC_CONFIGURATION = ETH_MAC_CONFIGURATION_IPC_Msk;
840 
841 	/* disable jumbo frames */
842 	dev_cfg->regs->MAC_CONFIGURATION &= ~ETH_MAC_CONFIGURATION_JE_Msk;
843 
844 
845 	/* Initialize Filter registers - disable zero quanta pause*/
846 	dev_cfg->regs->FLOW_CONTROL = ETH_FLOW_CONTROL_DZPQ_Msk;
847 
848 	/* rsf - receive store and forward */
849 	/* tsf - transmit store and forward */
850 	dev_cfg->regs->OPERATION_MODE = ETH_OPERATION_MODE_RSF_Msk | ETH_OPERATION_MODE_TSF_Msk |
851 					ETH_OPERATION_MODE_OSF_Msk;
852 
853 	/* Increase enhanced descriptor to 8 WORDS, required when the Advanced */
854 	/* Time-Stamp feature or Full IPC Offload Engine is enabled */
855 	dev_cfg->regs->BUS_MODE = ETH_BUS_MODE_ATDS_Msk | ETH_BUS_MODE_AAL_Msk |
856 				  ETH_BUS_MODE_FB_Msk | (0x20 << ETH_BUS_MODE_PBL_Pos);
857 
858 	eth_xmc4xxx_tx_dma_descriptors_init(dev);
859 	ret = eth_xmc4xxx_rx_dma_descriptors_init(dev);
860 	if (ret != 0) {
861 		return ret;
862 	}
863 
864 	/* Clear interrupts */
865 	dev_cfg->regs->STATUS = ETH_STATUS_CLEARABLE_BITS;
866 
867 	eth_xmc4xxx_mask_unused_interrupts(dev_cfg->regs);
868 
869 #if !DT_INST_NODE_HAS_PROP(0, local_mac_address)
870 	gen_random_mac(dev_data->mac_addr, INFINEON_OUI_B0, INFINEON_OUI_B1, INFINEON_OUI_B2);
871 #endif
872 	eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr);
873 
874 	uint32_t reg = dev_cfg->regs->MAC_FRAME_FILTER;
875 	/* enable reception of broadcast frames */
876 	reg &= ~ETH_MAC_FRAME_FILTER_DBF_Msk;
877 	/* pass all multicast frames */
878 	reg |= ETH_MAC_FRAME_FILTER_PM_Msk;
879 	dev_cfg->regs->MAC_FRAME_FILTER = reg;
880 
881 	return eth_xmc4xxx_init_timestamp_control_reg(dev_cfg->regs);
882 }
883 
eth_xmc4xxx_capabilities(const struct device * dev)884 static enum ethernet_hw_caps eth_xmc4xxx_capabilities(const struct device *dev)
885 {
886 	ARG_UNUSED(dev);
887 	enum ethernet_hw_caps caps =  ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T |
888 	       ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD;
889 
890 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
891 	caps |= ETHERNET_PTP;
892 #endif
893 
894 #if defined(CONFIG_NET_VLAN)
895 	caps |= ETHERNET_HW_VLAN;
896 #endif
897 
898 	return caps;
899 }
900 
eth_xmc4xxx_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)901 static int eth_xmc4xxx_set_config(const struct device *dev, enum ethernet_config_type type,
902 				  const struct ethernet_config *config)
903 {
904 	struct eth_xmc4xxx_data *dev_data = dev->data;
905 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
906 
907 	switch (type) {
908 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
909 		memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr));
910 		LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name,
911 			dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2],
912 			dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]);
913 
914 		eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr);
915 		net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
916 				     sizeof(dev_data->mac_addr), NET_LINK_ETHERNET);
917 		return 0;
918 	default:
919 		break;
920 	}
921 
922 	return -ENOTSUP;
923 }
924 
eth_xmc4xxx_irq_config(void)925 static void eth_xmc4xxx_irq_config(void)
926 {
927 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_xmc4xxx_isr,
928 		    DEVICE_DT_INST_GET(0), 0);
929 	irq_enable(DT_INST_IRQN(0));
930 }
931 
932 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
eth_xmc4xxx_get_ptp_clock(const struct device * dev)933 static const struct device *eth_xmc4xxx_get_ptp_clock(const struct device *dev)
934 {
935 	struct eth_xmc4xxx_data *dev_data = dev->data;
936 
937 	return dev_data->ptp_clock;
938 }
939 #endif
940 
941 
942 #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER)
eth_xmc4xxx_vlan_setup(const struct device * dev,struct net_if * iface,uint16_t tag,bool enable)943 int eth_xmc4xxx_vlan_setup(const struct device *dev, struct net_if *iface, uint16_t tag,
944 			   bool enable)
945 {
946 	ARG_UNUSED(iface);
947 	const struct eth_xmc4xxx_config *dev_cfg = dev->config;
948 
949 	LOG_INF("Configuring vlan %d", tag);
950 
951 	if (enable) {
952 		dev_cfg->regs->VLAN_TAG = FIELD_PREP(ETH_VLAN_TAG_VL_Msk, tag) |
953 					  ETH_VLAN_TAG_ETV_Msk |
954 					  ETH_VLAN_TAG_ESVL_Msk;
955 		dev_cfg->regs->MAC_FRAME_FILTER |= ETH_MAC_FRAME_FILTER_VTFE_Msk;
956 	} else {
957 		dev_cfg->regs->VLAN_TAG = 0;
958 		dev_cfg->regs->MAC_FRAME_FILTER &= ~ETH_MAC_FRAME_FILTER_VTFE_Msk;
959 	}
960 
961 	return 0;
962 }
963 #endif
964 
965 static const struct ethernet_api eth_xmc4xxx_api = {
966 	.iface_api.init = eth_xmc4xxx_iface_init,
967 	.send = eth_xmc4xxx_send,
968 	.set_config = eth_xmc4xxx_set_config,
969 	.get_phy = eth_xmc4xxx_get_phy,
970 	.get_capabilities = eth_xmc4xxx_capabilities,
971 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
972 	.get_stats = eth_xmc4xxx_stats,
973 #endif
974 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
975 	.get_ptp_clock = eth_xmc4xxx_get_ptp_clock,
976 #endif
977 #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER)
978 	.vlan_setup = eth_xmc4xxx_vlan_setup,
979 #endif
980 };
981 
982 PINCTRL_DT_INST_DEFINE(0);
983 
984 static struct eth_xmc4xxx_config eth_xmc4xxx_config = {
985 	.regs = (ETH_GLOBAL_TypeDef *)DT_REG_ADDR(DT_INST_PARENT(0)),
986 	.irq_config_func = eth_xmc4xxx_irq_config,
987 	.phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle)),
988 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
989 	.port_ctrl = {
990 	    .rxd0 = DT_INST_ENUM_IDX(0, rxd0_port_ctrl),
991 	    .rxd1 = DT_INST_ENUM_IDX(0, rxd1_port_ctrl),
992 	    .rxd2 = DT_INST_ENUM_IDX_OR(0, rxd2_port_ctrl, 0),
993 	    .rxd3 = DT_INST_ENUM_IDX_OR(0, rxd3_port_ctrl, 0),
994 	    .clk_rmii = DT_INST_ENUM_IDX(0, rmii_rx_clk_port_ctrl),
995 	    .crs_dv = DT_INST_ENUM_IDX(0, crs_rx_dv_port_ctrl),
996 	    .crs = DT_INST_ENUM_IDX_OR(0, crs_port_ctrl, 0),
997 	    .rxer = DT_INST_ENUM_IDX(0, rxer_port_ctrl),
998 	    .col = DT_INST_ENUM_IDX_OR(0, col_port_ctrl, 0),
999 	    .clk_tx = DT_INST_ENUM_IDX_OR(0, tx_clk_port_ctrl, 0),
1000 	    .mode = DT_INST_ENUM_IDX_OR(0, phy_connection_type, 0),
1001 	}
1002 };
1003 
1004 static struct eth_xmc4xxx_data eth_xmc4xxx_data = {
1005 	.mac_addr = DT_INST_PROP_OR(0, local_mac_address, {0}),
1006 };
1007 
1008 ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_xmc4xxx_init, NULL, &eth_xmc4xxx_data, &eth_xmc4xxx_config,
1009 			      CONFIG_ETH_INIT_PRIORITY, &eth_xmc4xxx_api, NET_ETH_MTU);
1010 
1011 #if defined(CONFIG_PTP_CLOCK_XMC4XXX)
1012 
1013 struct ptp_context {
1014 	const struct device *eth_dev;
1015 };
1016 
1017 static struct ptp_context ptp_xmc4xxx_context_0;
1018 
eth_xmc4xxx_ptp_clock_set(const struct device * dev,struct net_ptp_time * tm)1019 static int eth_xmc4xxx_ptp_clock_set(const struct device *dev, struct net_ptp_time *tm)
1020 {
1021 	struct ptp_context *ptp_context = dev->data;
1022 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1023 
1024 	dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = tm->nanosecond;
1025 	dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = tm->second;
1026 
1027 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk;
1028 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0,
1029 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1030 		return -ETIMEDOUT;
1031 	}
1032 
1033 	return 0;
1034 }
1035 
eth_xmc4xxx_ptp_clock_get(const struct device * dev,struct net_ptp_time * tm)1036 static int eth_xmc4xxx_ptp_clock_get(const struct device *dev, struct net_ptp_time *tm)
1037 {
1038 	struct ptp_context *ptp_context = dev->data;
1039 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1040 
1041 	uint32_t nanosecond_0 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS;
1042 	uint32_t second_0 = dev_cfg->regs->SYSTEM_TIME_SECONDS;
1043 
1044 	uint32_t nanosecond_1 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS;
1045 	uint32_t second_1 = dev_cfg->regs->SYSTEM_TIME_SECONDS;
1046 
1047 	/* check that there is no roll over while we read the timestamp. If roll over happens */
1048 	/* just choose the later value */
1049 	if (second_0 == second_1) {
1050 		tm->second = second_0;
1051 		tm->nanosecond = nanosecond_0;
1052 	} else {
1053 		tm->second = second_1;
1054 		tm->nanosecond = nanosecond_1;
1055 	}
1056 
1057 	return 0;
1058 }
1059 
eth_xmc4xxx_ptp_clock_adjust(const struct device * dev,int increment)1060 static int eth_xmc4xxx_ptp_clock_adjust(const struct device *dev, int increment)
1061 {
1062 	struct ptp_context *ptp_context = dev->data;
1063 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1064 	uint32_t increment_tmp;
1065 
1066 	if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) {
1067 		return -EINVAL;
1068 	}
1069 
1070 	if (increment < 0) {
1071 		increment_tmp = -increment;
1072 		increment_tmp |= ETH_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_Msk;
1073 	} else {
1074 		increment_tmp = increment;
1075 	}
1076 
1077 	dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = increment_tmp;
1078 	dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = 0;
1079 
1080 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSUPDT_Msk;
1081 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSUPDT_Msk) == 0,
1082 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1083 		return -ETIMEDOUT;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
eth_xmc4xxx_ptp_clock_rate_adjust(const struct device * dev,double ratio)1089 static int eth_xmc4xxx_ptp_clock_rate_adjust(const struct device *dev, double ratio)
1090 {
1091 	struct ptp_context *ptp_context = dev->data;
1092 	const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config;
1093 	uint64_t K = dev_cfg->regs->TIMESTAMP_ADDEND;
1094 
1095 	if (ratio < ETH_PTP_RATE_ADJUST_RATIO_MIN || ratio > ETH_PTP_RATE_ADJUST_RATIO_MAX) {
1096 		return -EINVAL;
1097 	}
1098 
1099 	/* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz  */
1100 	K = K * ratio + 0.5;
1101 	if (K > UINT32_MAX) {
1102 		return -EINVAL;
1103 	}
1104 	dev_cfg->regs->TIMESTAMP_ADDEND = K;
1105 
1106 	/* Addend register update */
1107 	dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk;
1108 	if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0,
1109 		      ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) {
1110 		return -ETIMEDOUT;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 static DEVICE_API(ptp_clock, ptp_api_xmc4xxx) = {
1117 	.set = eth_xmc4xxx_ptp_clock_set,
1118 	.get = eth_xmc4xxx_ptp_clock_get,
1119 	.adjust = eth_xmc4xxx_ptp_clock_adjust,
1120 	.rate_adjust = eth_xmc4xxx_ptp_clock_rate_adjust,
1121 };
1122 
ptp_clock_xmc4xxx_init(const struct device * port)1123 static int ptp_clock_xmc4xxx_init(const struct device *port)
1124 {
1125 	const struct device *const eth_dev = DEVICE_DT_INST_GET(0);
1126 	struct eth_xmc4xxx_data *dev_data = eth_dev->data;
1127 	struct ptp_context *ptp_context = port->data;
1128 
1129 	dev_data->ptp_clock = port;
1130 	ptp_context->eth_dev = eth_dev;
1131 
1132 	return 0;
1133 }
1134 
1135 DEVICE_DEFINE(xmc4xxx_ptp_clock_0, PTP_CLOCK_NAME, ptp_clock_xmc4xxx_init, NULL,
1136 	      &ptp_xmc4xxx_context_0, NULL, POST_KERNEL, CONFIG_PTP_CLOCK_INIT_PRIORITY,
1137 	      &ptp_api_xmc4xxx);
1138 
1139 #endif /* CONFIG_PTP_CLOCK_XMC4XXX */
1140