1 /*
2  * Copyright (c) 2017 Erwin Rol <erwin@erwinrol.com>
3  * Copyright (c) 2020 Alexander Kozhinov <AlexanderKozhinov@yandex.com>
4  * Copyright (c) 2021 Carbon Robotics
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #define DT_DRV_COMPAT st_stm32_ethernet
9 
10 #define LOG_MODULE_NAME eth_stm32_hal
11 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
12 
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/device.h>
18 #include <zephyr/sys/__assert.h>
19 #include <zephyr/sys/util.h>
20 #include <zephyr/sys/crc.h>
21 #include <errno.h>
22 #include <stdbool.h>
23 #include <zephyr/net/net_pkt.h>
24 #include <zephyr/net/net_if.h>
25 #include <zephyr/net/ethernet.h>
26 #include <ethernet/eth_stats.h>
27 #include <soc.h>
28 #include <zephyr/sys/printk.h>
29 #include <zephyr/drivers/clock_control.h>
30 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
31 #include <zephyr/drivers/pinctrl.h>
32 #include <zephyr/irq.h>
33 #include <zephyr/net/lldp.h>
34 #include <zephyr/drivers/hwinfo.h>
35 
36 #if defined(CONFIG_NET_DSA)
37 #include <zephyr/net/dsa.h>
38 #endif
39 
40 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
41 #include <zephyr/drivers/ptp_clock.h>
42 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
43 
44 #include "eth.h"
45 #include "eth_stm32_hal_priv.h"
46 
47 #if defined(CONFIG_ETH_STM32_HAL_RANDOM_MAC) || DT_INST_PROP(0, zephyr_random_mac_address)
48 #define ETH_STM32_RANDOM_MAC
49 #endif
50 
51 #if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \
52 	    !DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
53 #error DTCM for DMA buffer is activated but zephyr,dtcm is not present in dts
54 #endif
55 
56 #define PHY_ADDR	CONFIG_ETH_STM32_HAL_PHY_ADDRESS
57 
58 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
59 
60 #define PHY_BSR  ((uint16_t)0x0001U)  /*!< Transceiver Basic Status Register */
61 #define PHY_LINKED_STATUS  ((uint16_t)0x0004U)  /*!< Valid link established */
62 
63 #define IS_ETH_DMATXDESC_OWN(dma_tx_desc)	(dma_tx_desc->DESC3 & \
64 							ETH_DMATXNDESCRF_OWN)
65 
66 #define ETH_RXBUFNB	ETH_RX_DESC_CNT
67 #define ETH_TXBUFNB	ETH_TX_DESC_CNT
68 
69 #define ETH_MEDIA_INTERFACE_MII		HAL_ETH_MII_MODE
70 #define ETH_MEDIA_INTERFACE_RMII	HAL_ETH_RMII_MODE
71 
72 /* Only one tx_buffer is sufficient to pass only 1 dma_buffer */
73 #define ETH_TXBUF_DEF_NB	1U
74 #else
75 
76 #define IS_ETH_DMATXDESC_OWN(dma_tx_desc)	(dma_tx_desc->Status & \
77 							ETH_DMATXDESC_OWN)
78 
79 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
80 
81 #define ETH_DMA_TX_TIMEOUT_MS	20U  /* transmit timeout in milliseconds */
82 
83 #if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \
84 	    DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay)
85 #define __eth_stm32_desc __dtcm_noinit_section
86 #define __eth_stm32_buf  __dtcm_noinit_section
87 #elif defined(CONFIG_SOC_SERIES_STM32H7X)
88 #define __eth_stm32_desc __attribute__((section(".eth_stm32_desc")))
89 #define __eth_stm32_buf  __attribute__((section(".eth_stm32_buf")))
90 #elif defined(CONFIG_NOCACHE_MEMORY)
91 #define __eth_stm32_desc __nocache __aligned(4)
92 #define __eth_stm32_buf  __nocache __aligned(4)
93 #else
94 #define __eth_stm32_desc __aligned(4)
95 #define __eth_stm32_buf  __aligned(4)
96 #endif
97 
98 static ETH_DMADescTypeDef dma_rx_desc_tab[ETH_RXBUFNB] __eth_stm32_desc;
99 static ETH_DMADescTypeDef dma_tx_desc_tab[ETH_TXBUFNB] __eth_stm32_desc;
100 static uint8_t dma_rx_buffer[ETH_RXBUFNB][ETH_STM32_RX_BUF_SIZE] __eth_stm32_buf;
101 static uint8_t dma_tx_buffer[ETH_TXBUFNB][ETH_STM32_TX_BUF_SIZE] __eth_stm32_buf;
102 
103 #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
104 
105 static struct net_if_mcast_monitor mcast_monitor;
106 
107 static K_MUTEX_DEFINE(multicast_addr_lock);
108 
109 #if defined(CONFIG_NET_NATIVE_IPV6)
110 static struct in6_addr multicast_ipv6_joined_addrs[NET_IF_MAX_IPV6_MADDR] = {0};
111 #endif /* CONFIG_NET_NATIVE_IPV6 */
112 
113 #if defined(CONFIG_NET_NATIVE_IPV4)
114 static struct in_addr multicast_ipv4_joined_addrs[NET_IF_MAX_IPV4_MADDR] = {0};
115 #endif /* CONFIG_NET_NATIVE_IPV4 */
116 
117 #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
118 
119 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
120 
121 BUILD_ASSERT(ETH_STM32_RX_BUF_SIZE % 4 == 0, "Rx buffer size must be a multiple of 4");
122 
123 struct eth_stm32_rx_buffer_header {
124 	struct eth_stm32_rx_buffer_header *next;
125 	uint16_t size;
126 	bool used;
127 };
128 
129 struct eth_stm32_tx_buffer_header {
130 	ETH_BufferTypeDef tx_buff;
131 	bool used;
132 };
133 
134 struct eth_stm32_tx_context {
135 	struct net_pkt *pkt;
136 	uint16_t first_tx_buffer_index;
137 };
138 
139 static struct eth_stm32_rx_buffer_header dma_rx_buffer_header[ETH_RXBUFNB];
140 static struct eth_stm32_tx_buffer_header dma_tx_buffer_header[ETH_TXBUFNB];
141 
HAL_ETH_RxAllocateCallback(uint8_t ** buf)142 void HAL_ETH_RxAllocateCallback(uint8_t **buf)
143 {
144 	for (size_t i = 0; i < ETH_RXBUFNB; ++i) {
145 		if (!dma_rx_buffer_header[i].used) {
146 			dma_rx_buffer_header[i].next = NULL;
147 			dma_rx_buffer_header[i].size = 0;
148 			dma_rx_buffer_header[i].used = true;
149 			*buf = dma_rx_buffer[i];
150 			return;
151 		}
152 	}
153 	*buf = NULL;
154 }
155 
156 /* Pointer to an array of ETH_STM32_RX_BUF_SIZE uint8_t's */
157 typedef uint8_t (*RxBufferPtr)[ETH_STM32_RX_BUF_SIZE];
158 
159 /* called by HAL_ETH_ReadData() */
HAL_ETH_RxLinkCallback(void ** pStart,void ** pEnd,uint8_t * buff,uint16_t Length)160 void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length)
161 {
162 	/* buff points to the begin on one of the rx buffers,
163 	 * so we can compute the index of the given buffer
164 	 */
165 	size_t index = (RxBufferPtr)buff - &dma_rx_buffer[0];
166 	struct eth_stm32_rx_buffer_header *header = &dma_rx_buffer_header[index];
167 
168 	__ASSERT_NO_MSG(index < ETH_RXBUFNB);
169 
170 	header->size = Length;
171 
172 	if (!*pStart) {
173 		/* first packet, set head pointer of linked list */
174 		*pStart = header;
175 		*pEnd = header;
176 	} else {
177 		__ASSERT_NO_MSG(*pEnd != NULL);
178 		/* not the first packet, add to list and adjust tail pointer */
179 		((struct eth_stm32_rx_buffer_header *)*pEnd)->next = header;
180 		*pEnd = header;
181 	}
182 }
183 
184 /* Called by HAL_ETH_ReleaseTxPacket */
HAL_ETH_TxFreeCallback(uint32_t * buff)185 void HAL_ETH_TxFreeCallback(uint32_t *buff)
186 {
187 	__ASSERT_NO_MSG(buff != NULL);
188 
189 	/* buff is the user context in tx_config.pData */
190 	struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff;
191 	struct eth_stm32_tx_buffer_header *buffer_header =
192 		&dma_tx_buffer_header[ctx->first_tx_buffer_index];
193 
194 	while (buffer_header != NULL) {
195 		buffer_header->used = false;
196 		if (buffer_header->tx_buff.next != NULL) {
197 			buffer_header = CONTAINER_OF(buffer_header->tx_buff.next,
198 				struct eth_stm32_tx_buffer_header, tx_buff);
199 		} else {
200 			buffer_header = NULL;
201 		}
202 	}
203 }
204 
205 /* allocate a tx buffer and mark it as used */
allocate_tx_buffer(void)206 static inline uint16_t allocate_tx_buffer(void)
207 {
208 	for (;;) {
209 		for (uint16_t index = 0; index < ETH_TXBUFNB; index++) {
210 			if (!dma_tx_buffer_header[index].used) {
211 				dma_tx_buffer_header[index].used = true;
212 				return index;
213 			}
214 		}
215 		k_yield();
216 	}
217 }
218 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
219 
220 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
221 	defined(CONFIG_ETH_STM32_HAL_API_V2)
222 static ETH_TxPacketConfig tx_config;
223 #endif
224 
read_eth_phy_register(ETH_HandleTypeDef * heth,uint32_t PHYAddr,uint32_t PHYReg,uint32_t * RegVal)225 static HAL_StatusTypeDef read_eth_phy_register(ETH_HandleTypeDef *heth,
226 						uint32_t PHYAddr,
227 						uint32_t PHYReg,
228 						uint32_t *RegVal)
229 {
230 #if defined(CONFIG_SOC_SERIES_STM32H7X) ||  defined(CONFIG_SOC_SERIES_STM32H5X) || \
231 	defined(CONFIG_ETH_STM32_HAL_API_V2)
232 	return HAL_ETH_ReadPHYRegister(heth, PHYAddr, PHYReg, RegVal);
233 #else
234 	ARG_UNUSED(PHYAddr);
235 	return HAL_ETH_ReadPHYRegister(heth, PHYReg, RegVal);
236 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
237 }
238 
setup_mac_filter(ETH_HandleTypeDef * heth)239 static inline void setup_mac_filter(ETH_HandleTypeDef *heth)
240 {
241 	__ASSERT_NO_MSG(heth != NULL);
242 
243 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
244 	ETH_MACFilterConfigTypeDef MACFilterConf;
245 
246 	HAL_ETH_GetMACFilterConfig(heth, &MACFilterConf);
247 #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
248 	MACFilterConf.HashMulticast = ENABLE;
249 	MACFilterConf.PassAllMulticast = DISABLE;
250 #else
251 	MACFilterConf.HashMulticast = DISABLE;
252 	MACFilterConf.PassAllMulticast = ENABLE;
253 #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
254 	MACFilterConf.HachOrPerfectFilter = DISABLE;
255 
256 	HAL_ETH_SetMACFilterConfig(heth, &MACFilterConf);
257 
258 	k_sleep(K_MSEC(1));
259 #else
260 	uint32_t tmp = heth->Instance->MACFFR;
261 
262 	/* clear all multicast filter bits, resulting in perfect filtering */
263 	tmp &= ~(ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE |
264 		 ETH_MULTICASTFRAMESFILTER_HASHTABLE |
265 		 ETH_MULTICASTFRAMESFILTER_PERFECT |
266 		 ETH_MULTICASTFRAMESFILTER_NONE);
267 
268 	if (IS_ENABLED(CONFIG_ETH_STM32_MULTICAST_FILTER)) {
269 		/* enable multicast hash receive filter */
270 		tmp |= ETH_MULTICASTFRAMESFILTER_HASHTABLE;
271 	} else {
272 		/* enable receiving all multicast frames */
273 		tmp |= ETH_MULTICASTFRAMESFILTER_NONE;
274 	}
275 
276 	heth->Instance->MACFFR = tmp;
277 
278 	/* Wait until the write operation will be taken into account:
279 	 * at least four TX_CLK/RX_CLK clock cycles
280 	 */
281 	tmp = heth->Instance->MACFFR;
282 	k_sleep(K_MSEC(1));
283 	heth->Instance->MACFFR = tmp;
284 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X) */
285 }
286 
287 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
eth_is_ptp_pkt(struct net_if * iface,struct net_pkt * pkt)288 static bool eth_is_ptp_pkt(struct net_if *iface, struct net_pkt *pkt)
289 {
290 #if defined(CONFIG_NET_VLAN)
291 	struct net_eth_vlan_hdr *hdr_vlan;
292 	struct ethernet_context *eth_ctx;
293 
294 	eth_ctx = net_if_l2_data(iface);
295 	if (net_eth_is_vlan_enabled(eth_ctx, iface)) {
296 		hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
297 
298 		if (ntohs(hdr_vlan->type) != NET_ETH_PTYPE_PTP) {
299 			return false;
300 		}
301 	} else
302 #endif
303 	{
304 		if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) {
305 			return false;
306 		}
307 	}
308 
309 	net_pkt_set_priority(pkt, NET_PRIORITY_CA);
310 
311 	return true;
312 }
313 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
HAL_ETH_TxPtpCallback(uint32_t * buff,ETH_TimeStampTypeDef * timestamp)314 void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp)
315 {
316 	struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff;
317 
318 	ctx->pkt->timestamp.second = timestamp->TimeStampHigh;
319 	ctx->pkt->timestamp.nanosecond = timestamp->TimeStampLow;
320 
321 	net_if_add_tx_timestamp(ctx->pkt);
322 }
323 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
324 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
325 
eth_tx(const struct device * dev,struct net_pkt * pkt)326 static int eth_tx(const struct device *dev, struct net_pkt *pkt)
327 {
328 	struct eth_stm32_hal_dev_data *dev_data = dev->data;
329 	ETH_HandleTypeDef *heth;
330 	int res;
331 	size_t total_len;
332 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
333 	size_t remaining_read;
334 	struct eth_stm32_tx_context ctx = {.pkt = pkt, .first_tx_buffer_index = 0};
335 	struct eth_stm32_tx_buffer_header *buf_header = NULL;
336 #else
337 	uint8_t *dma_buffer;
338 	__IO ETH_DMADescTypeDef *dma_tx_desc;
339 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
340 	HAL_StatusTypeDef hal_ret = HAL_OK;
341 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
342 	bool timestamped_frame;
343 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
344 
345 	__ASSERT_NO_MSG(pkt != NULL);
346 	__ASSERT_NO_MSG(pkt->frags != NULL);
347 	__ASSERT_NO_MSG(dev != NULL);
348 	__ASSERT_NO_MSG(dev_data != NULL);
349 
350 	heth = &dev_data->heth;
351 
352 	total_len = net_pkt_get_len(pkt);
353 	if (total_len > (ETH_STM32_TX_BUF_SIZE * ETH_TXBUFNB)) {
354 		LOG_ERR("PKT too big");
355 		return -EIO;
356 	}
357 
358 	k_mutex_lock(&dev_data->tx_mutex, K_FOREVER);
359 
360 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
361 	ctx.first_tx_buffer_index = allocate_tx_buffer();
362 	buf_header = &dma_tx_buffer_header[ctx.first_tx_buffer_index];
363 #else /* CONFIG_ETH_STM32_HAL_API_V2 */
364 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
365 	uint32_t cur_tx_desc_idx;
366 
367 	cur_tx_desc_idx = heth->TxDescList.CurTxDesc;
368 	dma_tx_desc = (ETH_DMADescTypeDef *)heth->TxDescList.TxDesc[cur_tx_desc_idx];
369 #else
370 	dma_tx_desc = heth->TxDesc;
371 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
372 
373 	while (IS_ETH_DMATXDESC_OWN(dma_tx_desc) != (uint32_t)RESET) {
374 		k_yield();
375 	}
376 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
377 
378 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
379 	timestamped_frame = eth_is_ptp_pkt(net_pkt_iface(pkt), pkt);
380 	if (timestamped_frame) {
381 		/* Enable transmit timestamp */
382 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
383 		HAL_ETH_PTP_InsertTxTimestamp(heth);
384 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
385 		dma_tx_desc->DESC2 |= ETH_DMATXNDESCRF_TTSE;
386 #else
387 		dma_tx_desc->Status |= ETH_DMATXDESC_TTSE;
388 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
389 	}
390 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
391 
392 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
393 	remaining_read = total_len;
394 	/* fill and allocate buffer until remaining data fits in one buffer */
395 	while (remaining_read > ETH_STM32_TX_BUF_SIZE) {
396 		if (net_pkt_read(pkt, buf_header->tx_buff.buffer, ETH_STM32_TX_BUF_SIZE)) {
397 			res = -ENOBUFS;
398 			goto error;
399 		}
400 		const uint16_t next_buffer_id = allocate_tx_buffer();
401 
402 		buf_header->tx_buff.len = ETH_STM32_TX_BUF_SIZE;
403 		/* append new buffer to the linked list */
404 		buf_header->tx_buff.next = &dma_tx_buffer_header[next_buffer_id].tx_buff;
405 		/* and adjust tail pointer */
406 		buf_header = &dma_tx_buffer_header[next_buffer_id];
407 		remaining_read -= ETH_STM32_TX_BUF_SIZE;
408 	}
409 	if (net_pkt_read(pkt, buf_header->tx_buff.buffer, remaining_read)) {
410 		res = -ENOBUFS;
411 		goto error;
412 	}
413 	buf_header->tx_buff.len = remaining_read;
414 	buf_header->tx_buff.next = NULL;
415 
416 #else /* CONFIG_ETH_STM32_HAL_API_V2 */
417 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
418 	dma_buffer = dma_tx_buffer[cur_tx_desc_idx];
419 #else
420 	dma_buffer = (uint8_t *)(dma_tx_desc->Buffer1Addr);
421 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
422 
423 	if (net_pkt_read(pkt, dma_buffer, total_len)) {
424 		res = -ENOBUFS;
425 		goto error;
426 	}
427 
428 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
429 	ETH_BufferTypeDef tx_buffer_def;
430 
431 	tx_buffer_def.buffer = dma_buffer;
432 	tx_buffer_def.len = total_len;
433 	tx_buffer_def.next = NULL;
434 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
435 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
436 
437 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
438 	defined(CONFIG_ETH_STM32_HAL_API_V2)
439 
440 	tx_config.Length = total_len;
441 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
442 	tx_config.pData = &ctx;
443 	tx_config.TxBuffer = &dma_tx_buffer_header[ctx.first_tx_buffer_index].tx_buff;
444 #else
445 	tx_config.TxBuffer = &tx_buffer_def;
446 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
447 
448 	/* Reset TX complete interrupt semaphore before TX request*/
449 	k_sem_reset(&dev_data->tx_int_sem);
450 
451 	/* tx_buffer is allocated on function stack, we need */
452 	/* to wait for the transfer to complete */
453 	/* So it is not freed before the interrupt happens */
454 	hal_ret = HAL_ETH_Transmit_IT(heth, &tx_config);
455 
456 	if (hal_ret != HAL_OK) {
457 		LOG_ERR("HAL_ETH_Transmit: failed!");
458 		res = -EIO;
459 		goto error;
460 	}
461 
462 	/* Wait for end of TX buffer transmission */
463 	/* If the semaphore timeout breaks, it means */
464 	/* an error occurred or IT was not fired */
465 	if (k_sem_take(&dev_data->tx_int_sem,
466 			K_MSEC(ETH_DMA_TX_TIMEOUT_MS)) != 0) {
467 
468 		LOG_ERR("HAL_ETH_TransmitIT tx_int_sem take timeout");
469 		res = -EIO;
470 
471 #ifndef CONFIG_ETH_STM32_HAL_API_V2
472 		/* Content of the packet could be the reason for timeout */
473 		LOG_HEXDUMP_ERR(dma_buffer, total_len, "eth packet timeout");
474 #endif
475 
476 		/* Check for errors */
477 		/* Ethernet device was put in error state */
478 		/* Error state is unrecoverable ? */
479 		if (HAL_ETH_GetState(heth) == HAL_ETH_STATE_ERROR) {
480 			LOG_ERR("%s: ETH in error state: errorcode:%x",
481 				__func__,
482 				HAL_ETH_GetError(heth));
483 			/* TODO recover from error state by restarting eth */
484 		}
485 
486 		/* Check for DMA errors */
487 		if (HAL_ETH_GetDMAError(heth)) {
488 			LOG_ERR("%s: ETH DMA error: dmaerror:%x",
489 				__func__,
490 				HAL_ETH_GetDMAError(heth));
491 			/* DMA fatal bus errors are putting in error state*/
492 			/* TODO recover from this */
493 		}
494 
495 		/* Check for MAC errors */
496 		if (HAL_ETH_GetMACError(heth)) {
497 			LOG_ERR("%s: ETH MAC error: macerror:%x",
498 				__func__,
499 				HAL_ETH_GetMACError(heth));
500 			/* MAC errors are putting in error state*/
501 			/* TODO recover from this */
502 		}
503 
504 		goto error;
505 	}
506 
507 #else
508 	hal_ret = HAL_ETH_TransmitFrame(heth, total_len);
509 
510 	if (hal_ret != HAL_OK) {
511 		LOG_ERR("HAL_ETH_Transmit: failed!");
512 		res = -EIO;
513 		goto error;
514 	}
515 
516 	/* When Transmit Underflow flag is set, clear it and issue a
517 	 * Transmit Poll Demand to resume transmission.
518 	 */
519 	if ((heth->Instance->DMASR & ETH_DMASR_TUS) != (uint32_t)RESET) {
520 		/* Clear TUS ETHERNET DMA flag */
521 		heth->Instance->DMASR = ETH_DMASR_TUS;
522 		/* Resume DMA transmission*/
523 		heth->Instance->DMATPDR = 0;
524 		res = -EIO;
525 		goto error;
526 	}
527 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
528 
529 #if defined(CONFIG_PTP_CLOCK_STM32_HAL) && !defined(CONFIG_ETH_STM32_HAL_API_V2)
530 	if (timestamped_frame) {
531 		/* Retrieve transmission timestamp from last DMA TX descriptor */
532 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
533 		ETH_TxDescListTypeDef * dma_tx_desc_list;
534 
535 		__IO ETH_DMADescTypeDef *last_dma_tx_desc;
536 
537 		dma_tx_desc_list = &heth->TxDescList;
538 		for (uint32_t i = 0; i < ETH_TX_DESC_CNT; i++) {
539 			const uint32_t last_desc_idx = (cur_tx_desc_idx + i) % ETH_TX_DESC_CNT;
540 
541 			last_dma_tx_desc =
542 				(ETH_DMADescTypeDef *)dma_tx_desc_list->TxDesc[last_desc_idx];
543 			if (last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) {
544 				break;
545 			}
546 		}
547 
548 		while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) {
549 			/* Wait for transmission */
550 			k_yield();
551 		}
552 
553 		if ((last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) &&
554 				(last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_TTSS)) {
555 			pkt->timestamp.second = last_dma_tx_desc->DESC1;
556 			pkt->timestamp.nanosecond = last_dma_tx_desc->DESC0;
557 		} else {
558 			/* Invalid value */
559 			pkt->timestamp.second = UINT64_MAX;
560 			pkt->timestamp.nanosecond = UINT32_MAX;
561 		}
562 #else
563 		__IO ETH_DMADescTypeDef *last_dma_tx_desc = dma_tx_desc;
564 
565 		while (!(last_dma_tx_desc->Status & ETH_DMATXDESC_LS) &&
566 				last_dma_tx_desc->Buffer2NextDescAddr) {
567 			last_dma_tx_desc =
568 				(ETH_DMADescTypeDef *)last_dma_tx_desc->Buffer2NextDescAddr;
569 		}
570 
571 		while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) {
572 			/* Wait for transmission */
573 			k_yield();
574 		}
575 
576 		if (last_dma_tx_desc->Status & ETH_DMATXDESC_LS &&
577 				last_dma_tx_desc->Status & ETH_DMATXDESC_TTSS) {
578 			pkt->timestamp.second = last_dma_tx_desc->TimeStampHigh;
579 			pkt->timestamp.nanosecond = last_dma_tx_desc->TimeStampLow;
580 		} else {
581 			/* Invalid value */
582 			pkt->timestamp.second = UINT64_MAX;
583 			pkt->timestamp.nanosecond = UINT32_MAX;
584 		}
585 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
586 
587 		net_if_add_tx_timestamp(pkt);
588 	}
589 #endif /* CONFIG_PTP_CLOCK_STM32_HAL && !CONFIG_ETH_STM32_HAL_API_V2 */
590 
591 	res = 0;
592 error:
593 
594 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
595 	/* free package tx buffer */
596 	if (res != 0) {
597 		HAL_ETH_TxFreeCallback((uint32_t *)&ctx);
598 	} else if (HAL_ETH_ReleaseTxPacket(heth) != HAL_OK) {
599 		LOG_ERR("HAL_ETH_ReleaseTxPacket failed");
600 		res = -EIO;
601 	}
602 #endif
603 
604 	k_mutex_unlock(&dev_data->tx_mutex);
605 
606 	return res;
607 }
608 
get_iface(struct eth_stm32_hal_dev_data * ctx,uint16_t vlan_tag)609 static struct net_if *get_iface(struct eth_stm32_hal_dev_data *ctx,
610 				uint16_t vlan_tag)
611 {
612 #if defined(CONFIG_NET_VLAN)
613 	struct net_if *iface;
614 
615 	iface = net_eth_get_vlan_iface(ctx->iface, vlan_tag);
616 	if (!iface) {
617 		return ctx->iface;
618 	}
619 
620 	return iface;
621 #else
622 	ARG_UNUSED(vlan_tag);
623 
624 	return ctx->iface;
625 #endif
626 }
627 
eth_rx(const struct device * dev,uint16_t * vlan_tag)628 static struct net_pkt *eth_rx(const struct device *dev, uint16_t *vlan_tag)
629 {
630 	struct eth_stm32_hal_dev_data *dev_data;
631 	ETH_HandleTypeDef *heth;
632 	struct net_pkt *pkt;
633 	size_t total_len = 0;
634 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
635 	void *appbuf = NULL;
636 	struct eth_stm32_rx_buffer_header *rx_header;
637 #else
638 #if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_SOC_SERIES_STM32H5X)
639 	__IO ETH_DMADescTypeDef *dma_rx_desc;
640 #endif /* !CONFIG_SOC_SERIES_STM32H7X */
641 	uint8_t *dma_buffer;
642 	HAL_StatusTypeDef hal_ret = HAL_OK;
643 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
644 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
645 	struct net_ptp_time timestamp;
646 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
647 	ETH_TimeStampTypeDef ts_registers;
648 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
649 	/* Default to invalid value. */
650 	timestamp.second = UINT64_MAX;
651 	timestamp.nanosecond = UINT32_MAX;
652 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
653 
654 	__ASSERT_NO_MSG(dev != NULL);
655 
656 	dev_data = dev->data;
657 
658 	__ASSERT_NO_MSG(dev_data != NULL);
659 
660 	heth = &dev_data->heth;
661 
662 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
663 	if (HAL_ETH_ReadData(heth, &appbuf) != HAL_OK) {
664 		/* no frame available */
665 		return NULL;
666 	}
667 
668 	/* computing total length */
669 	for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
670 			rx_header; rx_header = rx_header->next) {
671 		total_len += rx_header->size;
672 	}
673 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
674 	if (HAL_ETH_IsRxDataAvailable(heth) != true) {
675 		/* no frame available */
676 		return NULL;
677 	}
678 
679 	ETH_BufferTypeDef rx_buffer_def;
680 	uint32_t frame_length = 0;
681 
682 	hal_ret = HAL_ETH_GetRxDataBuffer(heth, &rx_buffer_def);
683 	if (hal_ret != HAL_OK) {
684 		LOG_ERR("HAL_ETH_GetRxDataBuffer: failed with state: %d",
685 			hal_ret);
686 		return NULL;
687 	}
688 
689 	hal_ret = HAL_ETH_GetRxDataLength(heth, &frame_length);
690 	if (hal_ret != HAL_OK) {
691 		LOG_ERR("HAL_ETH_GetRxDataLength: failed with state: %d",
692 			hal_ret);
693 		return NULL;
694 	}
695 
696 	total_len = frame_length;
697 	dma_buffer = rx_buffer_def.buffer;
698 #else
699 	hal_ret = HAL_ETH_GetReceivedFrame_IT(heth);
700 	if (hal_ret != HAL_OK) {
701 		/* no frame available */
702 		return NULL;
703 	}
704 
705 	total_len = heth->RxFrameInfos.length;
706 	dma_buffer = (uint8_t *)heth->RxFrameInfos.buffer;
707 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
708 
709 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
710 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
711 
712 	if (HAL_ETH_PTP_GetRxTimestamp(heth, &ts_registers) == HAL_OK) {
713 		timestamp.second = ts_registers.TimeStampHigh;
714 		timestamp.nanosecond = ts_registers.TimeStampLow;
715 	}
716 
717 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
718 	ETH_RxDescListTypeDef * dma_rx_desc_list;
719 
720 	dma_rx_desc_list = &heth->RxDescList;
721 	if (dma_rx_desc_list->AppDescNbr) {
722 		__IO ETH_DMADescTypeDef *last_dma_rx_desc;
723 
724 		const uint32_t last_desc_idx =
725 			(dma_rx_desc_list->FirstAppDesc + dma_rx_desc_list->AppDescNbr - 1U)
726 				% ETH_RX_DESC_CNT;
727 
728 		last_dma_rx_desc =
729 			(ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[last_desc_idx];
730 
731 		if (dma_rx_desc_list->AppContextDesc &&
732 				last_dma_rx_desc->DESC1 & ETH_DMARXNDESCWBF_TSA) {
733 			/* Retrieve timestamp from context DMA descriptor */
734 			__IO ETH_DMADescTypeDef *context_dma_rx_desc;
735 
736 			const uint32_t context_desc_idx = (last_desc_idx + 1U) % ETH_RX_DESC_CNT;
737 
738 			context_dma_rx_desc =
739 				(ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[context_desc_idx];
740 			if (context_dma_rx_desc->DESC1 != UINT32_MAX ||
741 					context_dma_rx_desc->DESC0 != UINT32_MAX) {
742 				timestamp.second = context_dma_rx_desc->DESC1;
743 				timestamp.nanosecond = context_dma_rx_desc->DESC0;
744 			}
745 		}
746 	}
747 #else
748 	__IO ETH_DMADescTypeDef *last_dma_rx_desc;
749 
750 	last_dma_rx_desc = heth->RxFrameInfos.LSRxDesc;
751 	if (last_dma_rx_desc->TimeStampHigh != UINT32_MAX ||
752 			last_dma_rx_desc->TimeStampLow != UINT32_MAX) {
753 		timestamp.second = last_dma_rx_desc->TimeStampHigh;
754 		timestamp.nanosecond = last_dma_rx_desc->TimeStampLow;
755 	}
756 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
757 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
758 
759 	pkt = net_pkt_rx_alloc_with_buffer(get_iface(dev_data, *vlan_tag),
760 					   total_len, AF_UNSPEC, 0, K_MSEC(100));
761 	if (!pkt) {
762 		LOG_ERR("Failed to obtain RX buffer");
763 		goto release_desc;
764 	}
765 
766 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
767 	for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
768 			rx_header; rx_header = rx_header->next) {
769 		const size_t index = rx_header - &dma_rx_buffer_header[0];
770 
771 		__ASSERT_NO_MSG(index < ETH_RXBUFNB);
772 		if (net_pkt_write(pkt, dma_rx_buffer[index], rx_header->size)) {
773 			LOG_ERR("Failed to append RX buffer to context buffer");
774 			net_pkt_unref(pkt);
775 			pkt = NULL;
776 			goto release_desc;
777 		}
778 	}
779 #else
780 	if (net_pkt_write(pkt, dma_buffer, total_len)) {
781 		LOG_ERR("Failed to append RX buffer to context buffer");
782 		net_pkt_unref(pkt);
783 		pkt = NULL;
784 		goto release_desc;
785 	}
786 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
787 
788 release_desc:
789 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
790 	for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf;
791 			rx_header; rx_header = rx_header->next) {
792 		rx_header->used = false;
793 	}
794 
795 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
796 	hal_ret = HAL_ETH_BuildRxDescriptors(heth);
797 	if (hal_ret != HAL_OK) {
798 		LOG_ERR("HAL_ETH_BuildRxDescriptors: failed: %d", hal_ret);
799 	}
800 #else
801 	/* Release descriptors to DMA */
802 	/* Point to first descriptor */
803 	dma_rx_desc = heth->RxFrameInfos.FSRxDesc;
804 	/* Set Own bit in Rx descriptors: gives the buffers back to DMA */
805 	for (int i = 0; i < heth->RxFrameInfos.SegCount; i++) {
806 		dma_rx_desc->Status |= ETH_DMARXDESC_OWN;
807 		dma_rx_desc = (ETH_DMADescTypeDef *)
808 			(dma_rx_desc->Buffer2NextDescAddr);
809 	}
810 
811 	/* Clear Segment_Count */
812 	heth->RxFrameInfos.SegCount = 0;
813 
814 	/* When Rx Buffer unavailable flag is set: clear it
815 	 * and resume reception.
816 	 */
817 	if ((heth->Instance->DMASR & ETH_DMASR_RBUS) != (uint32_t)RESET) {
818 		/* Clear RBUS ETHERNET DMA flag */
819 		heth->Instance->DMASR = ETH_DMASR_RBUS;
820 		/* Resume DMA reception */
821 		heth->Instance->DMARPDR = 0;
822 	}
823 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
824 
825 	if (!pkt) {
826 		goto out;
827 	}
828 
829 #if defined(CONFIG_NET_VLAN)
830 	struct net_eth_hdr *hdr = NET_ETH_HDR(pkt);
831 
832 	if (ntohs(hdr->type) == NET_ETH_PTYPE_VLAN) {
833 		struct net_eth_vlan_hdr *hdr_vlan =
834 			(struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt);
835 
836 		net_pkt_set_vlan_tci(pkt, ntohs(hdr_vlan->vlan.tci));
837 		*vlan_tag = net_pkt_vlan_tag(pkt);
838 
839 #if CONFIG_NET_TC_RX_COUNT > 1
840 		enum net_priority prio;
841 
842 		prio = net_vlan2priority(net_pkt_vlan_priority(pkt));
843 		net_pkt_set_priority(pkt, prio);
844 #endif
845 	} else {
846 		net_pkt_set_iface(pkt, dev_data->iface);
847 	}
848 #endif /* CONFIG_NET_VLAN */
849 
850 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
851 	if (eth_is_ptp_pkt(get_iface(dev_data, *vlan_tag), pkt)) {
852 		pkt->timestamp.second = timestamp.second;
853 		pkt->timestamp.nanosecond = timestamp.nanosecond;
854 	} else {
855 		/* Invalid value */
856 		pkt->timestamp.second = UINT64_MAX;
857 		pkt->timestamp.nanosecond = UINT32_MAX;
858 	}
859 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
860 
861 out:
862 	if (!pkt) {
863 		eth_stats_update_errors_rx(get_iface(dev_data, *vlan_tag));
864 	}
865 
866 	return pkt;
867 }
868 
rx_thread(void * arg1,void * unused1,void * unused2)869 static void rx_thread(void *arg1, void *unused1, void *unused2)
870 {
871 	uint16_t vlan_tag = NET_VLAN_TAG_UNSPEC;
872 	const struct device *dev;
873 	struct eth_stm32_hal_dev_data *dev_data;
874 	struct net_if *iface;
875 	struct net_pkt *pkt;
876 	int res;
877 	uint32_t status;
878 	HAL_StatusTypeDef hal_ret = HAL_OK;
879 
880 	__ASSERT_NO_MSG(arg1 != NULL);
881 	ARG_UNUSED(unused1);
882 	ARG_UNUSED(unused2);
883 
884 	dev = (const struct device *)arg1;
885 	dev_data = dev->data;
886 
887 	__ASSERT_NO_MSG(dev_data != NULL);
888 
889 	while (1) {
890 		res = k_sem_take(&dev_data->rx_int_sem,
891 			K_MSEC(CONFIG_ETH_STM32_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS));
892 		if (res == 0) {
893 			/* semaphore taken, update link status and receive packets */
894 			if (dev_data->link_up != true) {
895 				dev_data->link_up = true;
896 				net_eth_carrier_on(get_iface(dev_data,
897 							     vlan_tag));
898 			}
899 			while ((pkt = eth_rx(dev, &vlan_tag)) != NULL) {
900 				iface = net_pkt_iface(pkt);
901 #if defined(CONFIG_NET_DSA)
902 				iface = dsa_net_recv(iface, &pkt);
903 #endif
904 				res = net_recv_data(iface, pkt);
905 				if (res < 0) {
906 					eth_stats_update_errors_rx(
907 							net_pkt_iface(pkt));
908 					LOG_ERR("Failed to enqueue frame "
909 						"into RX queue: %d", res);
910 					net_pkt_unref(pkt);
911 				}
912 			}
913 		} else if (res == -EAGAIN) {
914 			/* semaphore timeout period expired, check link status */
915 			hal_ret = read_eth_phy_register(&dev_data->heth,
916 				    PHY_ADDR, PHY_BSR, (uint32_t *) &status);
917 			if (hal_ret == HAL_OK) {
918 				if ((status & PHY_LINKED_STATUS) == PHY_LINKED_STATUS) {
919 					if (dev_data->link_up != true) {
920 						dev_data->link_up = true;
921 						net_eth_carrier_on(
922 							get_iface(dev_data,
923 								  vlan_tag));
924 					}
925 				} else {
926 					if (dev_data->link_up != false) {
927 						dev_data->link_up = false;
928 						net_eth_carrier_off(
929 							get_iface(dev_data,
930 								  vlan_tag));
931 					}
932 				}
933 			}
934 		}
935 	}
936 }
937 
eth_isr(const struct device * dev)938 static void eth_isr(const struct device *dev)
939 {
940 	struct eth_stm32_hal_dev_data *dev_data;
941 	ETH_HandleTypeDef *heth;
942 
943 	__ASSERT_NO_MSG(dev != NULL);
944 
945 	dev_data = dev->data;
946 
947 	__ASSERT_NO_MSG(dev_data != NULL);
948 
949 	heth = &dev_data->heth;
950 
951 	__ASSERT_NO_MSG(heth != NULL);
952 
953 	HAL_ETH_IRQHandler(heth);
954 }
955 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
956 	defined(CONFIG_ETH_STM32_HAL_API_V2)
HAL_ETH_TxCpltCallback(ETH_HandleTypeDef * heth_handle)957 void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth_handle)
958 {
959 	__ASSERT_NO_MSG(heth_handle != NULL);
960 
961 	struct eth_stm32_hal_dev_data *dev_data =
962 		CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth);
963 
964 	__ASSERT_NO_MSG(dev_data != NULL);
965 
966 	k_sem_give(&dev_data->tx_int_sem);
967 
968 }
969 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
970 
971 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
HAL_ETH_ErrorCallback(ETH_HandleTypeDef * heth)972 void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth)
973 {
974 	/* Do not log errors. If errors are reported due to high traffic,
975 	 * logging errors will only increase traffic issues
976 	 */
977 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
978 	__ASSERT_NO_MSG(heth != NULL);
979 
980 	uint32_t dma_error;
981 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
982 	uint32_t mac_error;
983 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
984 	const uint32_t error_code = HAL_ETH_GetError(heth);
985 
986 	struct eth_stm32_hal_dev_data *dev_data =
987 		CONTAINER_OF(heth, struct eth_stm32_hal_dev_data, heth);
988 
989 	switch (error_code) {
990 	case HAL_ETH_ERROR_DMA:
991 		dma_error = HAL_ETH_GetDMAError(heth);
992 
993 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
994 		if ((dma_error & ETH_DMA_RX_WATCHDOG_TIMEOUT_FLAG)   ||
995 			(dma_error & ETH_DMA_RX_PROCESS_STOPPED_FLAG)    ||
996 			(dma_error & ETH_DMA_RX_BUFFER_UNAVAILABLE_FLAG)) {
997 			eth_stats_update_errors_rx(dev_data->iface);
998 		}
999 		if ((dma_error & ETH_DMA_EARLY_TX_IT_FLAG) ||
1000 			(dma_error & ETH_DMA_TX_PROCESS_STOPPED_FLAG)) {
1001 			eth_stats_update_errors_tx(dev_data->iface);
1002 		}
1003 #else
1004 		if ((dma_error & ETH_DMASR_RWTS) ||
1005 			(dma_error & ETH_DMASR_RPSS) ||
1006 			(dma_error & ETH_DMASR_RBUS)) {
1007 			eth_stats_update_errors_rx(dev_data->iface);
1008 		}
1009 		if ((dma_error & ETH_DMASR_ETS)  ||
1010 			(dma_error & ETH_DMASR_TPSS) ||
1011 			(dma_error & ETH_DMASR_TJTS)) {
1012 			eth_stats_update_errors_tx(dev_data->iface);
1013 		}
1014 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1015 		break;
1016 
1017 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1018 	case HAL_ETH_ERROR_MAC:
1019 		mac_error = HAL_ETH_GetMACError(heth);
1020 
1021 		if (mac_error & ETH_RECEIVE_WATCHDOG_TIMEOUT) {
1022 			eth_stats_update_errors_rx(dev_data->iface);
1023 		}
1024 
1025 		if ((mac_error & ETH_EXECESSIVE_COLLISIONS)  ||
1026 			(mac_error & ETH_LATE_COLLISIONS)        ||
1027 			(mac_error & ETH_EXECESSIVE_DEFERRAL)    ||
1028 			(mac_error & ETH_TRANSMIT_JABBR_TIMEOUT) ||
1029 			(mac_error & ETH_LOSS_OF_CARRIER)        ||
1030 			(mac_error & ETH_NO_CARRIER)) {
1031 			eth_stats_update_errors_tx(dev_data->iface);
1032 		}
1033 		break;
1034 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1035 	}
1036 
1037 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1038 	dev_data->stats.error_details.rx_crc_errors = heth->Instance->MMCRCRCEPR;
1039 	dev_data->stats.error_details.rx_align_errors = heth->Instance->MMCRAEPR;
1040 #else
1041 	dev_data->stats.error_details.rx_crc_errors = heth->Instance->MMCRFCECR;
1042 	dev_data->stats.error_details.rx_align_errors = heth->Instance->MMCRFAECR;
1043 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1044 
1045 #endif /* CONFIG_NET_STATISTICS_ETHERNET */
1046 }
1047 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1048 /* DMA and MAC errors callback only appear in H7 series */
HAL_ETH_DMAErrorCallback(ETH_HandleTypeDef * heth_handle)1049 void HAL_ETH_DMAErrorCallback(ETH_HandleTypeDef *heth_handle)
1050 {
1051 	__ASSERT_NO_MSG(heth_handle != NULL);
1052 
1053 	LOG_ERR("%s errorcode:%x dmaerror:%x",
1054 		__func__,
1055 		HAL_ETH_GetError(heth_handle),
1056 		HAL_ETH_GetDMAError(heth_handle));
1057 
1058 	/* State of eth handle is ERROR in case of unrecoverable error */
1059 	/* unrecoverable (ETH_DMACSR_FBE | ETH_DMACSR_TPS | ETH_DMACSR_RPS) */
1060 	if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) {
1061 		LOG_ERR("%s ethernet in error state", __func__);
1062 		/* TODO restart the ETH peripheral to recover */
1063 		return;
1064 	}
1065 
1066 	/* Recoverable errors don't put ETH in error state */
1067 	/* ETH_DMACSR_CDE | ETH_DMACSR_ETI | ETH_DMACSR_RWT */
1068 	/* | ETH_DMACSR_RBU | ETH_DMACSR_AIS) */
1069 
1070 	/* TODO Check if we were TX transmitting and the unlock semaphore */
1071 	/* To return the error as soon as possible else we'll just wait */
1072 	/* for the timeout */
1073 
1074 
1075 }
HAL_ETH_MACErrorCallback(ETH_HandleTypeDef * heth_handle)1076 void HAL_ETH_MACErrorCallback(ETH_HandleTypeDef *heth_handle)
1077 {
1078 	__ASSERT_NO_MSG(heth_handle != NULL);
1079 
1080 	/* MAC errors dumping */
1081 	LOG_ERR("%s errorcode:%x macerror:%x",
1082 		__func__,
1083 		HAL_ETH_GetError(heth_handle),
1084 		HAL_ETH_GetMACError(heth_handle));
1085 
1086 	/* State of eth handle is ERROR in case of unrecoverable error */
1087 	if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) {
1088 		LOG_ERR("%s ethernet in error state", __func__);
1089 		/* TODO restart or reconfig ETH peripheral to recover */
1090 
1091 		return;
1092 	}
1093 }
1094 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
1095 
HAL_ETH_RxCpltCallback(ETH_HandleTypeDef * heth_handle)1096 void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth_handle)
1097 {
1098 	__ASSERT_NO_MSG(heth_handle != NULL);
1099 
1100 	struct eth_stm32_hal_dev_data *dev_data =
1101 		CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth);
1102 
1103 	__ASSERT_NO_MSG(dev_data != NULL);
1104 
1105 	k_sem_give(&dev_data->rx_int_sem);
1106 }
1107 
generate_mac(uint8_t * mac_addr)1108 static void generate_mac(uint8_t *mac_addr)
1109 {
1110 #if defined(ETH_STM32_RANDOM_MAC)
1111 	/* Either CONFIG_ETH_STM32_HAL_RANDOM_MAC or device tree property */
1112 	/* "zephyr,random-mac-address" is set, generate a random mac address */
1113 	gen_random_mac(mac_addr, ST_OUI_B0, ST_OUI_B1, ST_OUI_B2);
1114 #else /* Use user defined mac address */
1115 	mac_addr[0] = ST_OUI_B0;
1116 	mac_addr[1] = ST_OUI_B1;
1117 	mac_addr[2] = ST_OUI_B2;
1118 #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))
1119 	mac_addr[3] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 3);
1120 	mac_addr[4] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 4);
1121 	mac_addr[5] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 5);
1122 #elif defined(CONFIG_ETH_STM32_HAL_USER_STATIC_MAC)
1123 	mac_addr[3] = CONFIG_ETH_STM32_HAL_MAC3;
1124 	mac_addr[4] = CONFIG_ETH_STM32_HAL_MAC4;
1125 	mac_addr[5] = CONFIG_ETH_STM32_HAL_MAC5;
1126 #else
1127 	uint8_t unique_device_ID_12_bytes[12];
1128 	uint32_t result_mac_32_bits;
1129 
1130 	/* Nothing defined by the user, use device id */
1131 	hwinfo_get_device_id(unique_device_ID_12_bytes, 12);
1132 	result_mac_32_bits = crc32_ieee((uint8_t *)unique_device_ID_12_bytes, 12);
1133 	memcpy(&mac_addr[3], &result_mac_32_bits, 3);
1134 
1135 #endif /* NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) */
1136 #endif
1137 }
1138 
eth_initialize(const struct device * dev)1139 static int eth_initialize(const struct device *dev)
1140 {
1141 	struct eth_stm32_hal_dev_data *dev_data;
1142 	const struct eth_stm32_hal_dev_cfg *cfg;
1143 	ETH_HandleTypeDef *heth;
1144 	HAL_StatusTypeDef hal_ret = HAL_OK;
1145 	int ret = 0;
1146 
1147 	__ASSERT_NO_MSG(dev != NULL);
1148 
1149 	dev_data = dev->data;
1150 	cfg = dev->config;
1151 
1152 	__ASSERT_NO_MSG(dev_data != NULL);
1153 	__ASSERT_NO_MSG(cfg != NULL);
1154 
1155 	dev_data->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE);
1156 
1157 	if (!device_is_ready(dev_data->clock)) {
1158 		LOG_ERR("clock control device not ready");
1159 		return -ENODEV;
1160 	}
1161 
1162 	/* enable clock */
1163 	ret = clock_control_on(dev_data->clock,
1164 		(clock_control_subsys_t)&cfg->pclken);
1165 	ret |= clock_control_on(dev_data->clock,
1166 		(clock_control_subsys_t)&cfg->pclken_tx);
1167 	ret |= clock_control_on(dev_data->clock,
1168 		(clock_control_subsys_t)&cfg->pclken_rx);
1169 #if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp)
1170 	ret |= clock_control_on(dev_data->clock,
1171 		(clock_control_subsys_t)&cfg->pclken_ptp);
1172 #endif
1173 
1174 	if (ret) {
1175 		LOG_ERR("Failed to enable ethernet clock");
1176 		return -EIO;
1177 	}
1178 
1179 	/* configure pinmux */
1180 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1181 	if (ret < 0) {
1182 		LOG_ERR("Could not configure ethernet pins");
1183 		return ret;
1184 	}
1185 
1186 	heth = &dev_data->heth;
1187 
1188 	generate_mac(dev_data->mac_addr);
1189 
1190 	heth->Init.MACAddr = dev_data->mac_addr;
1191 
1192 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
1193 	defined(CONFIG_ETH_STM32_HAL_API_V2)
1194 	heth->Init.TxDesc = dma_tx_desc_tab;
1195 	heth->Init.RxDesc = dma_rx_desc_tab;
1196 	heth->Init.RxBuffLen = ETH_STM32_RX_BUF_SIZE;
1197 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
1198 
1199 	hal_ret = HAL_ETH_Init(heth);
1200 	if (hal_ret == HAL_TIMEOUT) {
1201 		/* HAL Init time out. This could be linked to */
1202 		/* a recoverable error. Log the issue and continue */
1203 		/* driver initialisation */
1204 		LOG_ERR("HAL_ETH_Init Timed out");
1205 	} else if (hal_ret != HAL_OK) {
1206 		LOG_ERR("HAL_ETH_Init failed: %d", hal_ret);
1207 		return -EINVAL;
1208 	}
1209 
1210 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
1211 	/* Enable timestamping of RX packets. We enable all packets to be
1212 	 * timestamped to cover both IEEE 1588 and gPTP.
1213 	 */
1214 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1215 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSENALL;
1216 #else
1217 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSARFE;
1218 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1219 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
1220 
1221 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
1222 	defined(CONFIG_ETH_STM32_HAL_API_V2)
1223 	/* Tx config init: */
1224 	memset(&tx_config, 0, sizeof(ETH_TxPacketConfig));
1225 	tx_config.Attributes = ETH_TX_PACKETS_FEATURES_CSUM |
1226 				ETH_TX_PACKETS_FEATURES_CRCPAD;
1227 	tx_config.ChecksumCtrl = IS_ENABLED(CONFIG_ETH_STM32_HW_CHECKSUM) ?
1228 			ETH_CHECKSUM_IPHDR_PAYLOAD_INSERT_PHDR_CALC : ETH_CHECKSUM_DISABLE;
1229 	tx_config.CRCPadCtrl = ETH_CRC_PAD_INSERT;
1230 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
1231 
1232 	dev_data->link_up = false;
1233 
1234 	/* Initialize semaphores */
1235 	k_mutex_init(&dev_data->tx_mutex);
1236 	k_sem_init(&dev_data->rx_int_sem, 0, K_SEM_MAX_LIMIT);
1237 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
1238 	defined(CONFIG_ETH_STM32_HAL_API_V2)
1239 	k_sem_init(&dev_data->tx_int_sem, 0, K_SEM_MAX_LIMIT);
1240 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
1241 
1242 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \
1243 	defined(CONFIG_ETH_STM32_HAL_API_V2)
1244 	/* Adjust MDC clock range depending on HCLK frequency: */
1245 	HAL_ETH_SetMDIOClockRange(heth);
1246 
1247 	/* @TODO: read duplex mode and speed from PHY and set it to ETH */
1248 
1249 	ETH_MACConfigTypeDef mac_config;
1250 
1251 	HAL_ETH_GetMACConfig(heth, &mac_config);
1252 	mac_config.DuplexMode = IS_ENABLED(CONFIG_ETH_STM32_MODE_HALFDUPLEX) ?
1253 				      ETH_HALFDUPLEX_MODE : ETH_FULLDUPLEX_MODE;
1254 	mac_config.Speed = IS_ENABLED(CONFIG_ETH_STM32_SPEED_10M) ?
1255 				 ETH_SPEED_10M : ETH_SPEED_100M;
1256 	hal_ret = HAL_ETH_SetMACConfig(heth, &mac_config);
1257 	if (hal_ret != HAL_OK) {
1258 		LOG_ERR("HAL_ETH_SetMACConfig: failed: %d", hal_ret);
1259 	}
1260 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */
1261 
1262 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
1263 
1264 	/* prepare tx buffer header */
1265 	for (uint16_t i = 0; i < ETH_TXBUFNB; ++i) {
1266 		dma_tx_buffer_header[i].tx_buff.buffer = dma_tx_buffer[i];
1267 	}
1268 
1269 	hal_ret = HAL_ETH_Start_IT(heth);
1270 #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1271 	for (uint32_t i = 0; i < ETH_RX_DESC_CNT; i++) {
1272 		hal_ret = HAL_ETH_DescAssignMemory(heth, i, dma_rx_buffer[i],
1273 						   NULL);
1274 		if (hal_ret != HAL_OK) {
1275 			LOG_ERR("HAL_ETH_DescAssignMemory: failed: %d, i: %d",
1276 				hal_ret, i);
1277 			return -EINVAL;
1278 		}
1279 	}
1280 
1281 	hal_ret = HAL_ETH_Start_IT(heth);
1282 #else
1283 	HAL_ETH_DMATxDescListInit(heth, dma_tx_desc_tab,
1284 		&dma_tx_buffer[0][0], ETH_TXBUFNB);
1285 	HAL_ETH_DMARxDescListInit(heth, dma_rx_desc_tab,
1286 		&dma_rx_buffer[0][0], ETH_RXBUFNB);
1287 
1288 	hal_ret = HAL_ETH_Start(heth);
1289 #endif /* CONFIG_ETH_STM32_HAL_API_V2 */
1290 
1291 	if (hal_ret != HAL_OK) {
1292 		LOG_ERR("HAL_ETH_Start{_IT} failed");
1293 	}
1294 
1295 	setup_mac_filter(heth);
1296 
1297 
1298 
1299 	LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x",
1300 		dev_data->mac_addr[0], dev_data->mac_addr[1],
1301 		dev_data->mac_addr[2], dev_data->mac_addr[3],
1302 		dev_data->mac_addr[4], dev_data->mac_addr[5]);
1303 
1304 	return 0;
1305 }
1306 
1307 #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
1308 
1309 #if defined(CONFIG_NET_NATIVE_IPV6)
add_ipv6_multicast_addr(const struct in6_addr * addr)1310 static void add_ipv6_multicast_addr(const struct in6_addr *addr)
1311 {
1312 	uint32_t i;
1313 
1314 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1315 		if (net_ipv6_is_addr_unspecified(&multicast_ipv6_joined_addrs[i])) {
1316 			net_ipv6_addr_copy_raw((uint8_t *)&multicast_ipv6_joined_addrs[i],
1317 					(uint8_t *)addr);
1318 			break;
1319 		}
1320 	}
1321 }
1322 
remove_ipv6_multicast_addr(const struct in6_addr * addr)1323 static void remove_ipv6_multicast_addr(const struct in6_addr *addr)
1324 {
1325 	uint32_t i;
1326 
1327 	for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1328 		if (net_ipv6_addr_cmp(&multicast_ipv6_joined_addrs[i], addr)) {
1329 			net_ipv6_addr_copy_raw((uint8_t *)&multicast_ipv6_joined_addrs[i],
1330 					(uint8_t *)net_ipv6_unspecified_address);
1331 			break;
1332 		}
1333 	}
1334 }
1335 #endif /* CONFIG_NET_NATIVE_IPV6 */
1336 
1337 #if defined(CONFIG_NET_NATIVE_IPV4)
add_ipv4_multicast_addr(const struct in_addr * addr)1338 static void add_ipv4_multicast_addr(const struct in_addr *addr)
1339 {
1340 	uint32_t i;
1341 
1342 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
1343 		if (net_ipv4_is_addr_unspecified(&multicast_ipv4_joined_addrs[i])) {
1344 			net_ipv4_addr_copy_raw((uint8_t *)&multicast_ipv4_joined_addrs[i],
1345 					(uint8_t *)addr);
1346 			break;
1347 		}
1348 	}
1349 }
1350 
remove_ipv4_multicast_addr(const struct in_addr * addr)1351 static void remove_ipv4_multicast_addr(const struct in_addr *addr)
1352 {
1353 	uint32_t i;
1354 
1355 	for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
1356 		if (net_ipv4_addr_cmp(&multicast_ipv4_joined_addrs[i], addr)) {
1357 			multicast_ipv4_joined_addrs[i].s_addr = 0;
1358 			break;
1359 		}
1360 	}
1361 }
1362 #endif /* CONFIG_NET_NATIVE_IPV4 */
1363 
reverse(uint32_t val)1364 static uint32_t reverse(uint32_t val)
1365 {
1366 	uint32_t res = 0;
1367 	int i;
1368 
1369 	for (i = 0; i < 32; i++) {
1370 		if (val & BIT(i)) {
1371 			res |= BIT(31 - i);
1372 		}
1373 	}
1374 
1375 	return res;
1376 }
1377 
net_if_stm32_mcast_cb(struct net_if * iface,const struct net_addr * addr,bool is_joined)1378 static void net_if_stm32_mcast_cb(struct net_if *iface,
1379 			    const struct net_addr *addr,
1380 			    bool is_joined)
1381 {
1382 	ARG_UNUSED(addr);
1383 
1384 	const struct device *dev;
1385 	struct eth_stm32_hal_dev_data *dev_data;
1386 	ETH_HandleTypeDef *heth;
1387 	struct net_eth_addr mac_addr;
1388 	uint32_t crc;
1389 	uint32_t hash_table[2];
1390 	uint32_t hash_index;
1391 	int i;
1392 
1393 	dev = net_if_get_device(iface);
1394 
1395 	dev_data = (struct eth_stm32_hal_dev_data *)dev->data;
1396 
1397 	heth = &dev_data->heth;
1398 
1399 	hash_table[0] = 0;
1400 	hash_table[1] = 0;
1401 
1402 	if (is_joined) {
1403 	/* Save a copy of the hash table which we update with
1404 	 * the hash for a single multicast address for join
1405 	 */
1406 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1407 		hash_table[0] = heth->Instance->MACHT0R;
1408 		hash_table[1] = heth->Instance->MACHT1R;
1409 #else
1410 		hash_table[0] = heth->Instance->MACHTLR;
1411 		hash_table[1] = heth->Instance->MACHTHR;
1412 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1413 	}
1414 
1415 	k_mutex_lock(&multicast_addr_lock, K_FOREVER);
1416 
1417 #if defined(CONFIG_NET_NATIVE_IPV6)
1418 	if (is_joined) {
1419 		/* When joining only update the hash filter with the joining
1420 		 * multicast address.
1421 		 */
1422 		add_ipv6_multicast_addr(&addr->in6_addr);
1423 
1424 		net_eth_ipv6_mcast_to_mac_addr(&addr->in6_addr, &mac_addr);
1425 		crc = reverse(crc32_ieee(mac_addr.addr,
1426 					sizeof(struct net_eth_addr)));
1427 		hash_index = (crc >> 26) & 0x3f;
1428 		hash_table[hash_index / 32] |= (1 << (hash_index % 32));
1429 	} else {
1430 		/* When leaving its better to compute the full hash table
1431 		 * for all the multicast addresses that we're aware of.
1432 		 */
1433 		remove_ipv6_multicast_addr(&addr->in6_addr);
1434 
1435 		for (i = 0; i < NET_IF_MAX_IPV6_MADDR; i++) {
1436 			if (net_ipv6_is_addr_unspecified(&multicast_ipv6_joined_addrs[i])) {
1437 				continue;
1438 			}
1439 
1440 			net_eth_ipv6_mcast_to_mac_addr(&multicast_ipv6_joined_addrs[i],
1441 							&mac_addr);
1442 			crc = reverse(crc32_ieee(mac_addr.addr,
1443 						sizeof(struct net_eth_addr)));
1444 			hash_index = (crc >> 26) & 0x3f;
1445 			hash_table[hash_index / 32] |= (1 << (hash_index % 32));
1446 		}
1447 	}
1448 #endif /* CONFIG_NET_IPV6 */
1449 
1450 #if defined(CONFIG_NET_NATIVE_IPV4)
1451 	if (is_joined) {
1452 		/* When joining only update the hash filter with the joining
1453 		 * multicast address.
1454 		 */
1455 		add_ipv4_multicast_addr(&addr->in_addr);
1456 
1457 		net_eth_ipv4_mcast_to_mac_addr(&addr->in_addr, &mac_addr);
1458 		crc = reverse(crc32_ieee(mac_addr.addr,
1459 						sizeof(struct net_eth_addr)));
1460 		hash_index = (crc >> 26) & 0x3f;
1461 		hash_table[hash_index / 32] |= (1 << (hash_index % 32));
1462 	} else {
1463 		/* When leaving its better to compute the full hash table
1464 		 * for all the multicast addresses that we're aware of.
1465 		 */
1466 		remove_ipv4_multicast_addr(&addr->in_addr);
1467 
1468 		for (i = 0; i < NET_IF_MAX_IPV4_MADDR; i++) {
1469 			if (net_ipv4_is_addr_unspecified(&multicast_ipv4_joined_addrs[i])) {
1470 				continue;
1471 			}
1472 
1473 			net_eth_ipv4_mcast_to_mac_addr(&multicast_ipv4_joined_addrs[i],
1474 							&mac_addr);
1475 			crc = reverse(crc32_ieee(mac_addr.addr,
1476 						sizeof(struct net_eth_addr)));
1477 			hash_index = (crc >> 26) & 0x3f;
1478 			hash_table[hash_index / 32] |= (1 << (hash_index % 32));
1479 		}
1480 	}
1481 #endif /* CONFIG_NET_IPV4 */
1482 
1483 	k_mutex_unlock(&multicast_addr_lock);
1484 
1485 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1486 	heth->Instance->MACHT0R = hash_table[0];
1487 	heth->Instance->MACHT1R = hash_table[1];
1488 #else
1489 	heth->Instance->MACHTLR = hash_table[0];
1490 	heth->Instance->MACHTHR = hash_table[1];
1491 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1492 }
1493 
1494 #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
1495 
eth_iface_init(struct net_if * iface)1496 static void eth_iface_init(struct net_if *iface)
1497 {
1498 	const struct device *dev;
1499 	struct eth_stm32_hal_dev_data *dev_data;
1500 	bool is_first_init = false;
1501 
1502 	__ASSERT_NO_MSG(iface != NULL);
1503 
1504 	dev = net_if_get_device(iface);
1505 	__ASSERT_NO_MSG(dev != NULL);
1506 
1507 	dev_data = dev->data;
1508 	__ASSERT_NO_MSG(dev_data != NULL);
1509 
1510 	/* For VLAN, this value is only used to get the correct L2 driver.
1511 	 * The iface pointer in context should contain the main interface
1512 	 * if the VLANs are enabled.
1513 	 */
1514 	if (dev_data->iface == NULL) {
1515 		dev_data->iface = iface;
1516 		is_first_init = true;
1517 	}
1518 
1519 #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER)
1520 	net_if_mcast_mon_register(&mcast_monitor, iface, net_if_stm32_mcast_cb);
1521 #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */
1522 
1523 	/* Register Ethernet MAC Address with the upper layer */
1524 	net_if_set_link_addr(iface, dev_data->mac_addr,
1525 			     sizeof(dev_data->mac_addr),
1526 			     NET_LINK_ETHERNET);
1527 
1528 #if defined(CONFIG_NET_DSA)
1529 	dsa_register_master_tx(iface, &eth_tx);
1530 #endif
1531 
1532 	ethernet_init(iface);
1533 
1534 	net_if_carrier_off(iface);
1535 
1536 	net_lldp_set_lldpdu(iface);
1537 
1538 	if (is_first_init) {
1539 		const struct eth_stm32_hal_dev_cfg *cfg = dev->config;
1540 		/* Now that the iface is setup, we are safe to enable IRQs. */
1541 		__ASSERT_NO_MSG(cfg->config_func != NULL);
1542 		cfg->config_func();
1543 
1544 		/* Start interruption-poll thread */
1545 		k_thread_create(&dev_data->rx_thread, dev_data->rx_thread_stack,
1546 				K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack),
1547 				rx_thread, (void *) dev, NULL, NULL,
1548 				K_PRIO_COOP(CONFIG_ETH_STM32_HAL_RX_THREAD_PRIO),
1549 				0, K_NO_WAIT);
1550 
1551 		k_thread_name_set(&dev_data->rx_thread, "stm_eth");
1552 	}
1553 }
1554 
eth_stm32_hal_get_capabilities(const struct device * dev)1555 static enum ethernet_hw_caps eth_stm32_hal_get_capabilities(const struct device *dev)
1556 {
1557 	ARG_UNUSED(dev);
1558 
1559 	return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T
1560 #if defined(CONFIG_NET_VLAN)
1561 		| ETHERNET_HW_VLAN
1562 #endif
1563 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
1564 		| ETHERNET_PROMISC_MODE
1565 #endif
1566 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
1567 		| ETHERNET_PTP
1568 #endif
1569 #if defined(CONFIG_NET_LLDP)
1570 		| ETHERNET_LLDP
1571 #endif
1572 #if defined(CONFIG_ETH_STM32_HW_CHECKSUM)
1573 		| ETHERNET_HW_RX_CHKSUM_OFFLOAD
1574 		| ETHERNET_HW_TX_CHKSUM_OFFLOAD
1575 #endif
1576 #if defined(CONFIG_NET_DSA)
1577 		| ETHERNET_DSA_MASTER_PORT
1578 #endif
1579 		;
1580 }
1581 
eth_stm32_hal_set_config(const struct device * dev,enum ethernet_config_type type,const struct ethernet_config * config)1582 static int eth_stm32_hal_set_config(const struct device *dev,
1583 				    enum ethernet_config_type type,
1584 				    const struct ethernet_config *config)
1585 {
1586 	int ret = -ENOTSUP;
1587 	struct eth_stm32_hal_dev_data *dev_data;
1588 	ETH_HandleTypeDef *heth;
1589 
1590 	dev_data = dev->data;
1591 	heth = &dev_data->heth;
1592 
1593 	switch (type) {
1594 	case ETHERNET_CONFIG_TYPE_MAC_ADDRESS:
1595 		memcpy(dev_data->mac_addr, config->mac_address.addr, 6);
1596 		heth->Instance->MACA0HR = (dev_data->mac_addr[5] << 8) |
1597 			dev_data->mac_addr[4];
1598 		heth->Instance->MACA0LR = (dev_data->mac_addr[3] << 24) |
1599 			(dev_data->mac_addr[2] << 16) |
1600 			(dev_data->mac_addr[1] << 8) |
1601 			dev_data->mac_addr[0];
1602 		net_if_set_link_addr(dev_data->iface, dev_data->mac_addr,
1603 				     sizeof(dev_data->mac_addr),
1604 				     NET_LINK_ETHERNET);
1605 		ret = 0;
1606 		break;
1607 	case ETHERNET_CONFIG_TYPE_PROMISC_MODE:
1608 #if defined(CONFIG_NET_PROMISCUOUS_MODE)
1609 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1610 		if (config->promisc_mode) {
1611 			heth->Instance->MACPFR |= ETH_MACPFR_PR;
1612 		} else {
1613 			heth->Instance->MACPFR &= ~ETH_MACPFR_PR;
1614 		}
1615 #else
1616 		if (config->promisc_mode) {
1617 			heth->Instance->MACFFR |= ETH_MACFFR_PM;
1618 		} else {
1619 			heth->Instance->MACFFR &= ~ETH_MACFFR_PM;
1620 		}
1621 #endif  /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1622 		ret = 0;
1623 #endif /* CONFIG_NET_PROMISCUOUS_MODE */
1624 		break;
1625 	default:
1626 		break;
1627 	}
1628 
1629 	return ret;
1630 }
1631 
1632 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
eth_stm32_get_ptp_clock(const struct device * dev)1633 static const struct device *eth_stm32_get_ptp_clock(const struct device *dev)
1634 {
1635 	struct eth_stm32_hal_dev_data *dev_data = dev->data;
1636 
1637 	return dev_data->ptp_clock;
1638 }
1639 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
1640 
1641 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
eth_stm32_hal_get_stats(const struct device * dev)1642 static struct net_stats_eth *eth_stm32_hal_get_stats(const struct device *dev)
1643 {
1644 	struct eth_stm32_hal_dev_data *dev_data = dev->data;
1645 
1646 	return &dev_data->stats;
1647 }
1648 #endif /* CONFIG_NET_STATISTICS_ETHERNET */
1649 
1650 static const struct ethernet_api eth_api = {
1651 	.iface_api.init = eth_iface_init,
1652 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
1653 	.get_ptp_clock = eth_stm32_get_ptp_clock,
1654 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
1655 	.get_capabilities = eth_stm32_hal_get_capabilities,
1656 	.set_config = eth_stm32_hal_set_config,
1657 #if defined(CONFIG_NET_DSA)
1658 	.send = dsa_tx,
1659 #else
1660 	.send = eth_tx,
1661 #endif
1662 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
1663 	.get_stats = eth_stm32_hal_get_stats,
1664 #endif /* CONFIG_NET_STATISTICS_ETHERNET */
1665 };
1666 
eth0_irq_config(void)1667 static void eth0_irq_config(void)
1668 {
1669 	IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_isr,
1670 		    DEVICE_DT_INST_GET(0), 0);
1671 	irq_enable(DT_INST_IRQN(0));
1672 }
1673 
1674 PINCTRL_DT_INST_DEFINE(0);
1675 
1676 static const struct eth_stm32_hal_dev_cfg eth0_config = {
1677 	.config_func = eth0_irq_config,
1678 	.pclken = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bus),
1679 		   .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bits)},
1680 	.pclken_tx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bus),
1681 		      .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bits)},
1682 	.pclken_rx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bus),
1683 		      .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bits)},
1684 #if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp)
1685 	.pclken_ptp = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bus),
1686 		       .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bits)},
1687 #endif
1688 	.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1689 };
1690 
1691 static struct eth_stm32_hal_dev_data eth0_data = {
1692 	.heth = {
1693 		.Instance = (ETH_TypeDef *)DT_INST_REG_ADDR(0),
1694 		.Init = {
1695 #if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_SOC_SERIES_STM32H5X) && \
1696 	!defined(CONFIG_ETH_STM32_HAL_API_V2)
1697 #if defined(CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE)
1698 			.AutoNegotiation = ETH_AUTONEGOTIATION_ENABLE,
1699 #else
1700 			.AutoNegotiation = ETH_AUTONEGOTIATION_DISABLE,
1701 			.Speed = IS_ENABLED(CONFIG_ETH_STM32_SPEED_10M) ?
1702 				 ETH_SPEED_10M : ETH_SPEED_100M,
1703 			.DuplexMode = IS_ENABLED(CONFIG_ETH_STM32_MODE_HALFDUPLEX) ?
1704 				      ETH_MODE_HALFDUPLEX : ETH_MODE_FULLDUPLEX,
1705 #endif /* !CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE */
1706 			.PhyAddress = PHY_ADDR,
1707 			.RxMode = ETH_RXINTERRUPT_MODE,
1708 			.ChecksumMode = IS_ENABLED(CONFIG_ETH_STM32_HW_CHECKSUM) ?
1709 					ETH_CHECKSUM_BY_HARDWARE : ETH_CHECKSUM_BY_SOFTWARE,
1710 #endif /* !CONFIG_SOC_SERIES_STM32H7X */
1711 			.MediaInterface = IS_ENABLED(CONFIG_ETH_STM32_HAL_MII) ?
1712 					  ETH_MEDIA_INTERFACE_MII : ETH_MEDIA_INTERFACE_RMII,
1713 		},
1714 	},
1715 };
1716 
1717 ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_initialize,
1718 		    NULL, &eth0_data, &eth0_config,
1719 		    CONFIG_ETH_INIT_PRIORITY, &eth_api, ETH_STM32_HAL_MTU);
1720 
1721 #if defined(CONFIG_PTP_CLOCK_STM32_HAL)
1722 
1723 struct ptp_context {
1724 	struct eth_stm32_hal_dev_data *eth_dev_data;
1725 };
1726 
1727 static struct ptp_context ptp_stm32_0_context;
1728 
ptp_clock_stm32_set(const struct device * dev,struct net_ptp_time * tm)1729 static int ptp_clock_stm32_set(const struct device *dev,
1730 			      struct net_ptp_time *tm)
1731 {
1732 	struct ptp_context *ptp_context = dev->data;
1733 	struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
1734 	ETH_HandleTypeDef *heth = &eth_dev_data->heth;
1735 	unsigned int key;
1736 
1737 	key = irq_lock();
1738 
1739 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1740 	heth->Instance->MACSTSUR = tm->second;
1741 	heth->Instance->MACSTNUR = tm->nanosecond;
1742 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT;
1743 	while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) {
1744 		/* spin lock */
1745 	}
1746 #else
1747 	heth->Instance->PTPTSHUR = tm->second;
1748 	heth->Instance->PTPTSLUR = tm->nanosecond;
1749 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI;
1750 	while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) {
1751 		/* spin lock */
1752 	}
1753 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1754 
1755 	irq_unlock(key);
1756 
1757 	return 0;
1758 }
1759 
ptp_clock_stm32_get(const struct device * dev,struct net_ptp_time * tm)1760 static int ptp_clock_stm32_get(const struct device *dev,
1761 			      struct net_ptp_time *tm)
1762 {
1763 	struct ptp_context *ptp_context = dev->data;
1764 	struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
1765 	ETH_HandleTypeDef *heth = &eth_dev_data->heth;
1766 	unsigned int key;
1767 	uint32_t second_2;
1768 
1769 	key = irq_lock();
1770 
1771 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1772 	tm->second = heth->Instance->MACSTSR;
1773 	tm->nanosecond = heth->Instance->MACSTNR;
1774 	second_2 = heth->Instance->MACSTSR;
1775 #else
1776 	tm->second = heth->Instance->PTPTSHR;
1777 	tm->nanosecond = heth->Instance->PTPTSLR;
1778 	second_2 = heth->Instance->PTPTSHR;
1779 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1780 
1781 	irq_unlock(key);
1782 
1783 	if (tm->second != second_2 && tm->nanosecond < NSEC_PER_SEC / 2) {
1784 		/* Second rollover has happened during first measurement: second register
1785 		 * was read before second boundary and nanosecond register was read after.
1786 		 * We will use second_2 as a new second value.
1787 		 */
1788 		tm->second = second_2;
1789 	}
1790 
1791 	return 0;
1792 }
1793 
ptp_clock_stm32_adjust(const struct device * dev,int increment)1794 static int ptp_clock_stm32_adjust(const struct device *dev, int increment)
1795 {
1796 	struct ptp_context *ptp_context = dev->data;
1797 	struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
1798 	ETH_HandleTypeDef *heth = &eth_dev_data->heth;
1799 	int key, ret;
1800 
1801 	if ((increment <= (int32_t)(-NSEC_PER_SEC)) ||
1802 			(increment >= (int32_t)NSEC_PER_SEC)) {
1803 		ret = -EINVAL;
1804 	} else {
1805 		key = irq_lock();
1806 
1807 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1808 		heth->Instance->MACSTSUR = 0;
1809 		if (increment >= 0) {
1810 			heth->Instance->MACSTNUR = increment;
1811 		} else {
1812 			heth->Instance->MACSTNUR = ETH_MACSTNUR_ADDSUB | (NSEC_PER_SEC + increment);
1813 		}
1814 		heth->Instance->MACTSCR |= ETH_MACTSCR_TSUPDT;
1815 		while (heth->Instance->MACTSCR & ETH_MACTSCR_TSUPDT_Msk) {
1816 			/* spin lock */
1817 		}
1818 #else
1819 		heth->Instance->PTPTSHUR = 0;
1820 		if (increment >= 0) {
1821 			heth->Instance->PTPTSLUR = increment;
1822 		} else {
1823 			heth->Instance->PTPTSLUR = ETH_PTPTSLUR_TSUPNS | (-increment);
1824 		}
1825 		heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTU;
1826 		while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTU_Msk) {
1827 			/* spin lock */
1828 		}
1829 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1830 
1831 		ret = 0;
1832 		irq_unlock(key);
1833 	}
1834 
1835 	return ret;
1836 }
1837 
ptp_clock_stm32_rate_adjust(const struct device * dev,double ratio)1838 static int ptp_clock_stm32_rate_adjust(const struct device *dev, double ratio)
1839 {
1840 	struct ptp_context *ptp_context = dev->data;
1841 	struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data;
1842 	ETH_HandleTypeDef *heth = &eth_dev_data->heth;
1843 	int key, ret;
1844 	uint32_t addend_val;
1845 
1846 	/* No change needed */
1847 	if (ratio == 1.0L) {
1848 		return 0;
1849 	}
1850 
1851 	key = irq_lock();
1852 
1853 	ratio *= (double)eth_dev_data->clk_ratio_adj;
1854 
1855 	/* Limit possible ratio */
1856 	if (ratio * 100 < CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MIN_PCT ||
1857 			ratio * 100 > CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MAX_PCT) {
1858 		ret = -EINVAL;
1859 		goto error;
1860 	}
1861 
1862 	/* Save new ratio */
1863 	eth_dev_data->clk_ratio_adj = ratio;
1864 
1865 	/* Update addend register */
1866 	addend_val = UINT32_MAX * (double)eth_dev_data->clk_ratio * ratio;
1867 
1868 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1869 	heth->Instance->MACTSAR = addend_val;
1870 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG;
1871 	while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) {
1872 		/* spin lock */
1873 	}
1874 #else
1875 	heth->Instance->PTPTSAR = addend_val;
1876 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU;
1877 	while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) {
1878 		/* spin lock */
1879 	}
1880 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1881 
1882 	ret = 0;
1883 
1884 error:
1885 	irq_unlock(key);
1886 
1887 	return ret;
1888 }
1889 
1890 static const struct ptp_clock_driver_api api = {
1891 	.set = ptp_clock_stm32_set,
1892 	.get = ptp_clock_stm32_get,
1893 	.adjust = ptp_clock_stm32_adjust,
1894 	.rate_adjust = ptp_clock_stm32_rate_adjust,
1895 };
1896 
ptp_stm32_init(const struct device * port)1897 static int ptp_stm32_init(const struct device *port)
1898 {
1899 	const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(mac));
1900 	struct eth_stm32_hal_dev_data *eth_dev_data = dev->data;
1901 	const struct eth_stm32_hal_dev_cfg *eth_cfg = dev->config;
1902 	struct ptp_context *ptp_context = port->data;
1903 	ETH_HandleTypeDef *heth = &eth_dev_data->heth;
1904 	int ret;
1905 	uint32_t ptp_hclk_rate;
1906 	uint32_t ss_incr_ns;
1907 	uint32_t addend_val;
1908 
1909 	eth_dev_data->ptp_clock = port;
1910 	ptp_context->eth_dev_data = eth_dev_data;
1911 
1912 	/* Mask the Timestamp Trigger interrupt */
1913 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1914 	heth->Instance->MACIER &= ~(ETH_MACIER_TSIE);
1915 #else
1916 	heth->Instance->MACIMR &= ~(ETH_MACIMR_TSTIM);
1917 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1918 
1919 	/* Enable timestamping */
1920 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1921 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSENA;
1922 #else
1923 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSE;
1924 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1925 
1926 	/* Query ethernet clock rate */
1927 	ret = clock_control_get_rate(eth_dev_data->clock,
1928 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1929 		(clock_control_subsys_t)&eth_cfg->pclken,
1930 #else
1931 		(clock_control_subsys_t)&eth_cfg->pclken_ptp,
1932 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1933 		&ptp_hclk_rate);
1934 	if (ret) {
1935 		LOG_ERR("Failed to query ethernet clock");
1936 		return -EIO;
1937 	}
1938 
1939 	/* Program the subsecond increment register based on the PTP clock freq */
1940 	if (NSEC_PER_SEC % CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ != 0) {
1941 		LOG_ERR("PTP clock period must be an integer nanosecond value");
1942 		return -EINVAL;
1943 	}
1944 	ss_incr_ns = NSEC_PER_SEC / CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ;
1945 	if (ss_incr_ns > UINT8_MAX) {
1946 		LOG_ERR("PTP clock period is more than %d nanoseconds", UINT8_MAX);
1947 		return -EINVAL;
1948 	}
1949 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1950 	heth->Instance->MACSSIR = ss_incr_ns << ETH_MACMACSSIR_SSINC_Pos;
1951 #else
1952 	heth->Instance->PTPSSIR = ss_incr_ns;
1953 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1954 
1955 	/* Program timestamp addend register */
1956 	eth_dev_data->clk_ratio =
1957 		((double)CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ) / ((double)ptp_hclk_rate);
1958 	/*
1959 	 * clk_ratio is a ratio between desired PTP clock frequency and HCLK rate.
1960 	 * Because HCLK is defined by a physical oscillator, it might drift due
1961 	 * to manufacturing tolerances and environmental effects (e.g. temperature).
1962 	 * clk_ratio_adj compensates for such inaccuracies. It starts off as 1.0
1963 	 * and gets adjusted by calling ptp_clock_stm32_rate_adjust().
1964 	 */
1965 	eth_dev_data->clk_ratio_adj = 1.0f;
1966 	addend_val =
1967 		UINT32_MAX * eth_dev_data->clk_ratio * eth_dev_data->clk_ratio_adj;
1968 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1969 	heth->Instance->MACTSAR = addend_val;
1970 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG;
1971 	while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) {
1972 		k_yield();
1973 	}
1974 #else
1975 	heth->Instance->PTPTSAR = addend_val;
1976 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU;
1977 	while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) {
1978 		k_yield();
1979 	}
1980 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1981 
1982 	/* Enable fine timestamp correction method */
1983 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1984 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSCFUPDT;
1985 #else
1986 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSFCU;
1987 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1988 
1989 	/* Enable nanosecond rollover into a new second */
1990 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1991 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSCTRLSSR;
1992 #else
1993 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSSR;
1994 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
1995 
1996 	/* Initialize timestamp */
1997 #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X)
1998 	heth->Instance->MACSTSUR = 0;
1999 	heth->Instance->MACSTNUR = 0;
2000 	heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT;
2001 	while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) {
2002 		k_yield();
2003 	}
2004 #else
2005 	heth->Instance->PTPTSHUR = 0;
2006 	heth->Instance->PTPTSLUR = 0;
2007 	heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI;
2008 	while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) {
2009 		k_yield();
2010 	}
2011 #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */
2012 
2013 #if defined(CONFIG_ETH_STM32_HAL_API_V2)
2014 	/* Set PTP Configuration done */
2015 	heth->IsPtpConfigured = HAL_ETH_PTP_CONFIGURATED;
2016 #endif
2017 
2018 	return 0;
2019 }
2020 
2021 DEVICE_DEFINE(stm32_ptp_clock_0, PTP_CLOCK_NAME, ptp_stm32_init,
2022 		NULL, &ptp_stm32_0_context, NULL, POST_KERNEL,
2023 		CONFIG_ETH_STM32_HAL_PTP_CLOCK_INIT_PRIO, &api);
2024 
2025 #endif /* CONFIG_PTP_CLOCK_STM32_HAL */
2026