1 /*
2  * Xilinx Processor System Gigabit Ethernet controller (GEM) driver
3  *
4  * Copyright (c) 2021, Weidmueller Interface GmbH & Co. KG
5  * SPDX-License-Identifier: Apache-2.0
6  *
7  * Known current limitations / TODOs:
8  * - Only supports 32-bit addresses in buffer descriptors, therefore
9  *   the ZynqMP APU (Cortex-A53 cores) may not be fully supported.
10  * - Hardware timestamps not considered.
11  * - VLAN tags not considered.
12  * - Wake-on-LAN interrupt not supported.
13  * - Send function is not SMP-capable (due to single TX done semaphore).
14  * - Interrupt-driven PHY management not supported - polling only.
15  * - No explicit placement of the DMA memory area(s) in either a
16  *   specific memory section or at a fixed memory location yet. This
17  *   is not an issue as long as the controller is used in conjunction
18  *   with the Cortex-R5 QEMU target or an actual R5 running without the
19  *   MPU enabled.
20  * - No detailed error handling when evaluating the Interrupt Status,
21  *   RX Status and TX Status registers.
22  */
23 
24 #include <zephyr/kernel.h>
25 #include <zephyr/device.h>
26 #include <zephyr/devicetree.h>
27 #include <zephyr/sys/__assert.h>
28 
29 #include <zephyr/net/net_if.h>
30 #include <zephyr/net/ethernet.h>
31 #include <ethernet/eth_stats.h>
32 
33 #include "eth_xlnx_gem_priv.h"
34 
35 #define LOG_MODULE_NAME eth_xlnx_gem
36 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL
37 #include <zephyr/logging/log.h>
38 LOG_MODULE_REGISTER(LOG_MODULE_NAME);
39 
40 static int  eth_xlnx_gem_dev_init(const struct device *dev);
41 static void eth_xlnx_gem_iface_init(struct net_if *iface);
42 static void eth_xlnx_gem_isr(const struct device *dev);
43 static int  eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt);
44 static int  eth_xlnx_gem_start_device(const struct device *dev);
45 static int  eth_xlnx_gem_stop_device(const struct device *dev);
46 static enum ethernet_hw_caps
47 	eth_xlnx_gem_get_capabilities(const struct device *dev);
48 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
49 static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev);
50 #endif
51 
52 static void eth_xlnx_gem_reset_hw(const struct device *dev);
53 static void eth_xlnx_gem_configure_clocks(const struct device *dev);
54 static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev);
55 static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev);
56 static void eth_xlnx_gem_set_mac_address(const struct device *dev);
57 static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev);
58 static void eth_xlnx_gem_init_phy(const struct device *dev);
59 static void eth_xlnx_gem_poll_phy(struct k_work *item);
60 static void eth_xlnx_gem_configure_buffers(const struct device *dev);
61 static void eth_xlnx_gem_rx_pending_work(struct k_work *item);
62 static void eth_xlnx_gem_handle_rx_pending(const struct device *dev);
63 static void eth_xlnx_gem_tx_done_work(struct k_work *item);
64 static void eth_xlnx_gem_handle_tx_done(const struct device *dev);
65 
66 static const struct ethernet_api eth_xlnx_gem_apis = {
67 	.iface_api.init   = eth_xlnx_gem_iface_init,
68 	.get_capabilities = eth_xlnx_gem_get_capabilities,
69 	.send		  = eth_xlnx_gem_send,
70 	.start		  = eth_xlnx_gem_start_device,
71 	.stop		  = eth_xlnx_gem_stop_device,
72 #if defined(CONFIG_NET_STATISTICS_ETHERNET)
73 	.get_stats	  = eth_xlnx_gem_stats,
74 #endif
75 };
76 
77 /*
78  * Insert the configuration & run-time data for all GEM instances which
79  * are enabled in the device tree of the current target board.
80  */
DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE)81 DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE)
82 
83 /**
84  * @brief GEM device initialization function
85  * Initializes the GEM itself, the DMA memory area used by the GEM and,
86  * if enabled, an associated PHY attached to the GEM's MDIO interface.
87  *
88  * @param dev Pointer to the device data
89  * @retval 0 if the device initialization completed successfully
90  */
91 static int eth_xlnx_gem_dev_init(const struct device *dev)
92 {
93 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
94 	uint32_t reg_val;
95 
96 	/* Precondition checks using assertions */
97 
98 	/* Valid PHY address and polling interval, if PHY is to be managed */
99 	if (dev_conf->init_phy) {
100 		__ASSERT((dev_conf->phy_mdio_addr_fix >= 0 &&
101 			 dev_conf->phy_mdio_addr_fix <= 32),
102 			 "%s invalid PHY address %u, must be in range "
103 			 "1 to 32, or 0 for auto-detection",
104 			 dev->name, dev_conf->phy_mdio_addr_fix);
105 		__ASSERT(dev_conf->phy_poll_interval > 0,
106 			 "%s has an invalid zero PHY status polling "
107 			 "interval", dev->name);
108 	}
109 
110 	/* Valid max. / nominal link speed value */
111 	__ASSERT((dev_conf->max_link_speed == LINK_10MBIT ||
112 		 dev_conf->max_link_speed == LINK_100MBIT ||
113 		 dev_conf->max_link_speed == LINK_1GBIT),
114 		 "%s invalid max./nominal link speed value %u",
115 		 dev->name, (uint32_t)dev_conf->max_link_speed);
116 
117 	/* MDC clock divider validity check, SoC dependent */
118 #if defined(CONFIG_SOC_XILINX_ZYNQMP)
119 	__ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_48,
120 		 "%s invalid MDC clock divider value %u, must be in "
121 		 "range 0 to %u", dev->name, dev_conf->mdc_divider,
122 		 (uint32_t)MDC_DIVIDER_48);
123 #elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000)
124 	__ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_224,
125 		 "%s invalid MDC clock divider value %u, must be in "
126 		 "range 0 to %u", dev->name, dev_conf->mdc_divider,
127 		 (uint32_t)MDC_DIVIDER_224);
128 #endif
129 
130 	/* AMBA AHB configuration options */
131 	__ASSERT((dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_32BIT ||
132 		 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_64BIT ||
133 		 dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_128BIT),
134 		 "%s AMBA AHB bus width configuration is invalid",
135 		 dev->name);
136 	__ASSERT((dev_conf->ahb_burst_length == AHB_BURST_SINGLE ||
137 		 dev_conf->ahb_burst_length == AHB_BURST_INCR4 ||
138 		 dev_conf->ahb_burst_length == AHB_BURST_INCR8 ||
139 		 dev_conf->ahb_burst_length == AHB_BURST_INCR16),
140 		 "%s AMBA AHB burst length configuration is invalid",
141 		 dev->name);
142 
143 	/* HW RX buffer size */
144 	__ASSERT((dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_8KB ||
145 		 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_4KB ||
146 		 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_2KB ||
147 		 dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_1KB),
148 		 "%s hardware RX buffer size configuration is invalid",
149 		 dev->name);
150 
151 	/* HW RX buffer offset */
152 	__ASSERT(dev_conf->hw_rx_buffer_offset <= 3,
153 		 "%s hardware RX buffer offset %u is invalid, must be in "
154 		 "range 0 to 3", dev->name, dev_conf->hw_rx_buffer_offset);
155 
156 	/*
157 	 * RX & TX buffer sizes
158 	 * RX Buffer size must be a multiple of 64, as the size of the
159 	 * corresponding DMA receive buffer in AHB system memory is
160 	 * expressed as n * 64 bytes in the DMA configuration register.
161 	 */
162 	__ASSERT(dev_conf->rx_buffer_size % 64 == 0,
163 		 "%s RX buffer size %u is not a multiple of 64 bytes",
164 		 dev->name, dev_conf->rx_buffer_size);
165 	__ASSERT((dev_conf->rx_buffer_size != 0 &&
166 		 dev_conf->rx_buffer_size <= 16320),
167 		 "%s RX buffer size %u is invalid, should be >64, "
168 		 "must be 16320 bytes maximum.", dev->name,
169 		 dev_conf->rx_buffer_size);
170 	__ASSERT((dev_conf->tx_buffer_size != 0 &&
171 		 dev_conf->tx_buffer_size <= 16380),
172 		 "%s TX buffer size %u is invalid, should be >64, "
173 		 "must be 16380 bytes maximum.", dev->name,
174 		 dev_conf->tx_buffer_size);
175 
176 	/* Checksum offloading limitations of the QEMU GEM implementation */
177 #ifdef CONFIG_QEMU_TARGET
178 	__ASSERT(!dev_conf->enable_rx_chksum_offload,
179 		 "TCP/UDP/IP hardware checksum offloading is not "
180 		 "supported by the QEMU GEM implementation");
181 	__ASSERT(!dev_conf->enable_tx_chksum_offload,
182 		 "TCP/UDP/IP hardware checksum offloading is not "
183 		 "supported by the QEMU GEM implementation");
184 #endif
185 
186 	/*
187 	 * Initialization procedure as described in the Zynq-7000 TRM,
188 	 * chapter 16.3.x.
189 	 */
190 	eth_xlnx_gem_reset_hw(dev);		/* Chapter 16.3.1 */
191 	eth_xlnx_gem_set_initial_nwcfg(dev);	/* Chapter 16.3.2 */
192 	eth_xlnx_gem_set_mac_address(dev);	/* Chapter 16.3.2 */
193 	eth_xlnx_gem_set_initial_dmacr(dev);	/* Chapter 16.3.2 */
194 
195 	/* Enable MDIO -> set gem.net_ctrl[mgmt_port_en] */
196 	if (dev_conf->init_phy) {
197 		reg_val  = sys_read32(dev_conf->base_addr +
198 				      ETH_XLNX_GEM_NWCTRL_OFFSET);
199 		reg_val |= ETH_XLNX_GEM_NWCTRL_MDEN_BIT;
200 		sys_write32(reg_val, dev_conf->base_addr +
201 			    ETH_XLNX_GEM_NWCTRL_OFFSET);
202 	}
203 
204 	eth_xlnx_gem_configure_clocks(dev);	/* Chapter 16.3.3 */
205 	if (dev_conf->init_phy) {
206 		eth_xlnx_gem_init_phy(dev);	/* Chapter 16.3.4 */
207 	}
208 	eth_xlnx_gem_configure_buffers(dev);	/* Chapter 16.3.5 */
209 
210 	return 0;
211 }
212 
213 /**
214  * @brief GEM associated interface initialization function
215  * Initializes the interface associated with a GEM device.
216  *
217  * @param iface Pointer to the associated interface data struct
218  */
eth_xlnx_gem_iface_init(struct net_if * iface)219 static void eth_xlnx_gem_iface_init(struct net_if *iface)
220 {
221 	const struct device *dev = net_if_get_device(iface);
222 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
223 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
224 
225 	/* Set the initial contents of the current instance's run-time data */
226 	dev_data->iface = iface;
227 	net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET);
228 	ethernet_init(iface);
229 	net_if_carrier_off(iface);
230 
231 	/*
232 	 * Initialize the (delayed) work items for RX pending, TX done
233 	 * and PHY status polling handlers
234 	 */
235 	k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work);
236 	k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work);
237 	k_work_init_delayable(&dev_data->phy_poll_delayed_work,
238 			      eth_xlnx_gem_poll_phy);
239 
240 	/* Initialize TX completion semaphore */
241 	k_sem_init(&dev_data->tx_done_sem, 0, 1);
242 
243 	/*
244 	 * Initialize semaphores in the RX/TX BD rings which have not
245 	 * yet been initialized
246 	 */
247 	k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1);
248 	/* RX BD ring semaphore is not required at the time being */
249 
250 	/* Initialize the device's interrupt */
251 	dev_conf->config_func(dev);
252 
253 	/* Submit initial PHY status polling delayed work */
254 	k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT);
255 }
256 
257 /**
258  * @brief GEM interrupt service routine
259  * GEM interrupt service routine. Checks for indications of errors
260  * and either immediately handles RX pending / TX complete notifications
261  * or defers them to the system work queue.
262  *
263  * @param dev Pointer to the device data
264  */
eth_xlnx_gem_isr(const struct device * dev)265 static void eth_xlnx_gem_isr(const struct device *dev)
266 {
267 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
268 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
269 	uint32_t reg_val;
270 
271 	/* Read the interrupt status register */
272 	reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
273 
274 	/*
275 	 * TODO: handling if one or more error flag(s) are set in the
276 	 * interrupt status register. -> For now, just log them
277 	 */
278 	if (reg_val & ETH_XLNX_GEM_IXR_ERRORS_MASK) {
279 		LOG_ERR("%s error bit(s) set in Interrupt Status Reg.: 0x%08X",
280 			dev->name, reg_val);
281 	}
282 
283 	/*
284 	 * Check for the following indications by the controller:
285 	 * reg_val & 0x00000080 -> gem.intr_status bit [7] = Frame TX complete
286 	 * reg_val & 0x00000002 -> gem.intr_status bit [1] = Frame received
287 	 * comp. Zynq-7000 TRM, Chapter B.18, p. 1289/1290.
288 	 * If the respective condition's handling is configured to be deferred
289 	 * to the work queue thread, submit the corresponding job to the work
290 	 * queue, otherwise, handle the condition immediately.
291 	 */
292 	if ((reg_val & ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT) != 0) {
293 		sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
294 			    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
295 		sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
296 			    dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
297 		if (dev_conf->defer_txd_to_queue) {
298 			k_work_submit(&dev_data->tx_done_work);
299 		} else {
300 			eth_xlnx_gem_handle_tx_done(dev);
301 		}
302 	}
303 	if ((reg_val & ETH_XLNX_GEM_IXR_FRAME_RX_BIT) != 0) {
304 		sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
305 			    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
306 		sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
307 			    dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
308 		if (dev_conf->defer_rxp_to_queue) {
309 			k_work_submit(&dev_data->rx_pend_work);
310 		} else {
311 			eth_xlnx_gem_handle_rx_pending(dev);
312 		}
313 	}
314 
315 	/*
316 	 * Clear all interrupt status bits so that the interrupt is de-asserted
317 	 * by the GEM. -> TXSR/RXSR are read/cleared by either eth_xlnx_gem_-
318 	 * handle_tx_done or eth_xlnx_gem_handle_rx_pending if those actions
319 	 * are not deferred to the system's work queue for the current inter-
320 	 * face. If the latter is the case, those registers will be read/
321 	 * cleared whenever the corresponding work item submitted from within
322 	 * this ISR is being processed.
323 	 */
324 	sys_write32((0xFFFFFFFF & ~(ETH_XLNX_GEM_IXR_FRAME_RX_BIT |
325 		    ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT)),
326 		    dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
327 }
328 
329 /**
330  * @brief GEM data send function
331  * GEM data send function. Blocks until a TX complete notification has been
332  * received & processed.
333  *
334  * @param dev Pointer to the device data
335  * @param pkt Pointer to the data packet to be sent
336  * @retval -EINVAL in case of invalid parameters, e.g. zero data length
337  * @retval -EIO in case of:
338  *         (1) the attempt to TX data while the device is stopped,
339  *             the interface is down or the link is down,
340  *         (2) the attempt to TX data while no free buffers are available
341  *             in the DMA memory area,
342  *         (3) the transmission completion notification timing out
343  * @retval 0 if the packet was transmitted successfully
344  */
eth_xlnx_gem_send(const struct device * dev,struct net_pkt * pkt)345 static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt)
346 {
347 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
348 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
349 
350 	uint16_t tx_data_length;
351 	uint16_t tx_data_remaining;
352 	void *tx_buffer_offs;
353 
354 	uint8_t bds_reqd;
355 	uint8_t curr_bd_idx;
356 	uint8_t first_bd_idx;
357 
358 	uint32_t reg_ctrl;
359 	uint32_t reg_val;
360 	int sem_status;
361 
362 	if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN ||
363 			(!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) {
364 #ifdef CONFIG_NET_STATISTICS_ETHERNET
365 		dev_data->stats.tx_dropped++;
366 #endif
367 		return -EIO;
368 	}
369 
370 	tx_data_length = tx_data_remaining = net_pkt_get_len(pkt);
371 	if (tx_data_length == 0) {
372 		LOG_ERR("%s cannot TX, zero packet length", dev->name);
373 #ifdef CONFIG_NET_STATISTICS_ETHERNET
374 		dev_data->stats.errors.tx++;
375 #endif
376 		return -EINVAL;
377 	}
378 
379 	/*
380 	 * Check if enough buffer descriptors are available for the amount
381 	 * of data to be transmitted, update the free BD count if this is
382 	 * the case. Update the 'next to use' BD index in the TX BD ring if
383 	 * sufficient space is available. If TX done handling, where the BD
384 	 * ring's data is accessed as well, is performed via the system work
385 	 * queue, protect against interruptions during the update of the BD
386 	 * ring's data by taking the ring's semaphore. If TX done handling
387 	 * is performed within the ISR, protect against interruptions by
388 	 * disabling the TX done interrupt source.
389 	 */
390 	bds_reqd = (uint8_t)((tx_data_length + (dev_conf->tx_buffer_size - 1)) /
391 		   dev_conf->tx_buffer_size);
392 
393 	if (dev_conf->defer_txd_to_queue) {
394 		k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER);
395 	} else {
396 		sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
397 			    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
398 	}
399 
400 	if (bds_reqd > dev_data->txbd_ring.free_bds) {
401 		LOG_ERR("%s cannot TX, packet length %hu requires "
402 			"%hhu BDs, current free count = %hhu",
403 			dev->name, tx_data_length, bds_reqd,
404 			dev_data->txbd_ring.free_bds);
405 
406 		if (dev_conf->defer_txd_to_queue) {
407 			k_sem_give(&(dev_data->txbd_ring.ring_sem));
408 		} else {
409 			sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
410 				    dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
411 		}
412 #ifdef CONFIG_NET_STATISTICS_ETHERNET
413 		dev_data->stats.tx_dropped++;
414 #endif
415 		return -EIO;
416 	}
417 
418 	curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use;
419 	reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
420 
421 	dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) %
422 					  dev_conf->txbd_count;
423 	dev_data->txbd_ring.free_bds -= bds_reqd;
424 
425 	if (dev_conf->defer_txd_to_queue) {
426 		k_sem_give(&(dev_data->txbd_ring.ring_sem));
427 	} else {
428 		sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
429 			    dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
430 	}
431 
432 	/*
433 	 * Scatter the contents of the network packet's buffer to
434 	 * one or more DMA buffers.
435 	 */
436 	net_pkt_cursor_init(pkt);
437 	do {
438 		/* Calculate the base pointer of the target TX buffer */
439 		tx_buffer_offs = (void *)(dev_data->first_tx_buffer +
440 				 (dev_conf->tx_buffer_size * curr_bd_idx));
441 
442 		/* Copy packet data to DMA buffer */
443 		net_pkt_read(pkt, (void *)tx_buffer_offs,
444 			     (tx_data_remaining < dev_conf->tx_buffer_size) ?
445 			     tx_data_remaining : dev_conf->tx_buffer_size);
446 
447 		/* Update current BD's control word */
448 		reg_val = sys_read32(reg_ctrl) & (ETH_XLNX_GEM_TXBD_WRAP_BIT |
449 			  ETH_XLNX_GEM_TXBD_USED_BIT);
450 		reg_val |= (tx_data_remaining < dev_conf->tx_buffer_size) ?
451 			   tx_data_remaining : dev_conf->tx_buffer_size;
452 		sys_write32(reg_val, reg_ctrl);
453 
454 		if (tx_data_remaining > dev_conf->tx_buffer_size) {
455 			/* Switch to next BD */
456 			curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count;
457 			reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
458 		}
459 
460 		tx_data_remaining -= (tx_data_remaining < dev_conf->tx_buffer_size) ?
461 				     tx_data_remaining : dev_conf->tx_buffer_size;
462 	} while (tx_data_remaining > 0);
463 
464 	/* Set the 'last' bit in the current BD's control word */
465 	reg_val |= ETH_XLNX_GEM_TXBD_LAST_BIT;
466 
467 	/*
468 	 * Clear the 'used' bits of all BDs involved in the current
469 	 * transmission. In accordance with chapter 16.3.8 of the
470 	 * Zynq-7000 TRM, the 'used' bits shall be cleared in reverse
471 	 * order, so that the 'used' bit of the first BD is cleared
472 	 * last just before the transmission is started.
473 	 */
474 	reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT;
475 	sys_write32(reg_val, reg_ctrl);
476 
477 	while (curr_bd_idx != first_bd_idx) {
478 		curr_bd_idx = (curr_bd_idx != 0) ? (curr_bd_idx - 1) :
479 			      (dev_conf->txbd_count - 1);
480 		reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
481 		reg_val = sys_read32(reg_ctrl);
482 		reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT;
483 		sys_write32(reg_val, reg_ctrl);
484 	}
485 
486 	/* Set the start TX bit in the gem.net_ctrl register */
487 	reg_val  = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
488 	reg_val |= ETH_XLNX_GEM_NWCTRL_STARTTX_BIT;
489 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
490 
491 #ifdef CONFIG_NET_STATISTICS_ETHERNET
492 	dev_data->stats.bytes.sent += tx_data_length;
493 	dev_data->stats.pkts.tx++;
494 #endif
495 
496 	/* Block until TX has completed */
497 	sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100));
498 	if (sem_status < 0) {
499 		LOG_ERR("%s TX confirmation timed out", dev->name);
500 #ifdef CONFIG_NET_STATISTICS_ETHERNET
501 		dev_data->stats.tx_timeout_count++;
502 #endif
503 		return -EIO;
504 	}
505 
506 	return 0;
507 }
508 
509 /**
510  * @brief GEM device start function
511  * GEM device start function. Clears all status registers and any
512  * pending interrupts, enables RX and TX, enables interrupts. If
513  * no PHY is managed by the current driver instance, this function
514  * also declares the physical link up at the configured nominal
515  * link speed.
516  *
517  * @param dev Pointer to the device data
518  * @retval    0 upon successful completion
519  */
eth_xlnx_gem_start_device(const struct device * dev)520 static int eth_xlnx_gem_start_device(const struct device *dev)
521 {
522 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
523 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
524 	uint32_t reg_val;
525 
526 	if (dev_data->started) {
527 		return 0;
528 	}
529 	dev_data->started = true;
530 
531 	/* Disable & clear all the MAC interrupts */
532 	sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
533 		    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
534 	sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
535 		    dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
536 
537 	/* Clear RX & TX status registers */
538 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
539 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
540 
541 	/* RX and TX enable */
542 	reg_val  = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
543 	reg_val |= (ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT);
544 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
545 
546 	/* Enable all the MAC interrupts */
547 	sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
548 		    dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
549 
550 	/* Submit the delayed work for polling the link state */
551 	if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) {
552 		k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT);
553 	}
554 
555 	LOG_DBG("%s started", dev->name);
556 	return 0;
557 }
558 
559 /**
560  * @brief GEM device stop function
561  * GEM device stop function. Disables all interrupts, disables
562  * RX and TX, clears all status registers. If no PHY is managed
563  * by the current driver instance, this function also declares
564  * the physical link down.
565  *
566  * @param dev Pointer to the device data
567  * @retval    0 upon successful completion
568  */
eth_xlnx_gem_stop_device(const struct device * dev)569 static int eth_xlnx_gem_stop_device(const struct device *dev)
570 {
571 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
572 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
573 	uint32_t reg_val;
574 
575 	if (!dev_data->started) {
576 		return 0;
577 	}
578 	dev_data->started = false;
579 
580 	/* Cancel the delayed work that polls the link state */
581 	if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) {
582 		k_work_cancel_delayable(&dev_data->phy_poll_delayed_work);
583 	}
584 
585 	/* RX and TX disable */
586 	reg_val  = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
587 	reg_val &= (~(ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT));
588 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
589 
590 	/* Disable & clear all the MAC interrupts */
591 	sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
592 		    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
593 	sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK,
594 		    dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET);
595 
596 	/* Clear RX & TX status registers */
597 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
598 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
599 
600 	LOG_DBG("%s stopped", dev->name);
601 	return 0;
602 }
603 
604 /**
605  * @brief GEM capability request function
606  * Returns the capabilities of the GEM controller as an enumeration.
607  * All of the data returned is derived from the device configuration
608  * of the current GEM device instance.
609  *
610  * @param dev Pointer to the device data
611  * @return Enumeration containing the current GEM device's capabilities
612  */
eth_xlnx_gem_get_capabilities(const struct device * dev)613 static enum ethernet_hw_caps eth_xlnx_gem_get_capabilities(
614 	const struct device *dev)
615 {
616 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
617 	enum ethernet_hw_caps caps = (enum ethernet_hw_caps)0;
618 
619 	if (dev_conf->max_link_speed == LINK_1GBIT) {
620 		if (dev_conf->phy_advertise_lower) {
621 			caps |= (ETHERNET_LINK_1000BASE_T |
622 				ETHERNET_LINK_100BASE_T |
623 				ETHERNET_LINK_10BASE_T);
624 		} else {
625 			caps |= ETHERNET_LINK_1000BASE_T;
626 		}
627 	} else if (dev_conf->max_link_speed == LINK_100MBIT) {
628 		if (dev_conf->phy_advertise_lower) {
629 			caps |= (ETHERNET_LINK_100BASE_T |
630 				ETHERNET_LINK_10BASE_T);
631 		} else {
632 			caps |= ETHERNET_LINK_100BASE_T;
633 		}
634 	} else {
635 		caps |= ETHERNET_LINK_10BASE_T;
636 	}
637 
638 	if (dev_conf->enable_rx_chksum_offload) {
639 		caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD;
640 	}
641 
642 	if (dev_conf->enable_tx_chksum_offload) {
643 		caps |= ETHERNET_HW_TX_CHKSUM_OFFLOAD;
644 	}
645 
646 	if (dev_conf->enable_fdx) {
647 		caps |= ETHERNET_DUPLEX_SET;
648 	}
649 
650 	if (dev_conf->copy_all_frames) {
651 		caps |= ETHERNET_PROMISC_MODE;
652 	}
653 
654 	return caps;
655 }
656 
657 #ifdef CONFIG_NET_STATISTICS_ETHERNET
658 /**
659  * @brief GEM statistics data request function
660  * Returns a pointer to the statistics data of the current GEM controller.
661  *
662  * @param dev Pointer to the device data
663  * @return Pointer to the current GEM device's statistics data
664  */
eth_xlnx_gem_stats(const struct device * dev)665 static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev)
666 {
667 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
668 
669 	return &dev_data->stats;
670 }
671 #endif
672 
673 /**
674  * @brief GEM Hardware reset function
675  * Resets the current GEM device. Called from within the device
676  * initialization function.
677  *
678  * @param dev Pointer to the device data
679  */
eth_xlnx_gem_reset_hw(const struct device * dev)680 static void eth_xlnx_gem_reset_hw(const struct device *dev)
681 {
682 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
683 
684 	/*
685 	 * Controller reset sequence as described in the Zynq-7000 TRM,
686 	 * chapter 16.3.1.
687 	 */
688 
689 	/* Clear the NWCTRL register */
690 	sys_write32(0x00000000,
691 		    dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
692 
693 	/* Clear the statistics counters */
694 	sys_write32(ETH_XLNX_GEM_STATCLR_MASK,
695 		    dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET);
696 
697 	/* Clear the RX/TX status registers */
698 	sys_write32(ETH_XLNX_GEM_TXSRCLR_MASK,
699 		    dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
700 	sys_write32(ETH_XLNX_GEM_RXSRCLR_MASK,
701 		    dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
702 
703 	/* Disable all interrupts */
704 	sys_write32(ETH_XLNX_GEM_IDRCLR_MASK,
705 		    dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET);
706 
707 	/* Clear the buffer queues */
708 	sys_write32(0x00000000,
709 		    dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET);
710 	sys_write32(0x00000000,
711 		    dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET);
712 }
713 
714 /**
715  * @brief GEM clock configuration function
716  * Calculates the pre-scalers for the TX clock to match the current
717  * (if an associated PHY is managed) or nominal link speed. Called
718  * from within the device initialization function.
719  *
720  * @param dev Pointer to the device data
721  */
eth_xlnx_gem_configure_clocks(const struct device * dev)722 static void eth_xlnx_gem_configure_clocks(const struct device *dev)
723 {
724 	/*
725 	 * Clock source configuration for the respective GEM as described
726 	 * in the Zynq-7000 TRM, chapter 16.3.3, is not tackled here. This
727 	 * is performed by the PS7Init code. Only the DIVISOR and DIVISOR1
728 	 * values for the respective GEM's TX clock are calculated here.
729 	 */
730 
731 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
732 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
733 
734 	uint32_t div0;
735 	uint32_t div1;
736 	uint32_t target = 2500000; /* default prevents 'may be uninitialized' warning */
737 	uint32_t tmp;
738 	uint32_t clk_ctrl_reg;
739 
740 	if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) {
741 		/*
742 		 * Run-time data indicates 'link down' or PHY management
743 		 * is disabled for the current device -> this indicates the
744 		 * initial device initialization. Once the PHY status polling
745 		 * delayed work handler has picked up the result of the auto-
746 		 * negotiation (if enabled), this if-statement will evaluate
747 		 * to false.
748 		 */
749 		if (dev_conf->max_link_speed == LINK_10MBIT) {
750 			target = 2500000;   /* Target frequency: 2.5 MHz */
751 		} else if (dev_conf->max_link_speed == LINK_100MBIT) {
752 			target = 25000000;  /* Target frequency: 25 MHz */
753 		} else if (dev_conf->max_link_speed == LINK_1GBIT) {
754 			target = 125000000; /* Target frequency: 125 MHz */
755 		}
756 	} else if (dev_data->eff_link_speed != LINK_DOWN) {
757 		/*
758 		 * Use the effective link speed instead of the maximum/nominal
759 		 * link speed for clock configuration.
760 		 */
761 		if (dev_data->eff_link_speed == LINK_10MBIT) {
762 			target = 2500000;   /* Target frequency: 2.5 MHz */
763 		} else if (dev_data->eff_link_speed == LINK_100MBIT) {
764 			target = 25000000;  /* Target frequency: 25 MHz */
765 		} else if (dev_data->eff_link_speed == LINK_1GBIT) {
766 			target = 125000000; /* Target frequency: 125 MHz */
767 		}
768 	}
769 
770 	/*
771 	 * Calculate the divisors for the target frequency.
772 	 * The frequency of the PLL to which the divisors shall be applied are
773 	 * provided in the respective GEM's device tree data.
774 	 */
775 	for (div0 = 1; div0 < 64; div0++) {
776 		for (div1 = 1; div1 < 64; div1++) {
777 			tmp = ((dev_conf->pll_clock_frequency / div0) / div1);
778 			if (tmp >= (target - 10) && tmp <= (target + 10)) {
779 				break;
780 			}
781 		}
782 		if (tmp >= (target - 10) && tmp <= (target + 10)) {
783 			break;
784 		}
785 	}
786 
787 #if defined(CONFIG_SOC_XILINX_ZYNQMP)
788 	/*
789 	 * ZynqMP register crl_apb.GEMx_REF_CTRL:
790 	 * RX_CLKACT bit [26]
791 	 * CLKACT bit [25]
792 	 * div0 bits [13..8], div1 bits [21..16]
793 	 * Unlock CRL_APB write access if the write protect bit
794 	 * is currently set, restore it afterwards.
795 	 */
796 	clk_ctrl_reg  = sys_read32(dev_conf->clk_ctrl_reg_address);
797 	clk_ctrl_reg &= ~((ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK <<
798 			ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) |
799 			(ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK <<
800 			ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT));
801 	clk_ctrl_reg |=	((div0 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) <<
802 			ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) |
803 			((div1 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) <<
804 			ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT);
805 	clk_ctrl_reg |=	ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT |
806 			ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT;
807 
808 	/*
809 	 * Unlock CRL_APB write access if the write protect bit
810 	 * is currently set, restore it afterwards.
811 	 */
812 	tmp = sys_read32(ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
813 	if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) {
814 		sys_write32((tmp & ~ETH_XLNX_CRL_APB_WPROT_BIT),
815 			    ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
816 	}
817 	sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address);
818 	if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) {
819 		sys_write32(tmp, ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS);
820 	}
821 # elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000)
822 	clk_ctrl_reg  = sys_read32(dev_conf->clk_ctrl_reg_address);
823 	clk_ctrl_reg &= ~((ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK <<
824 			ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) |
825 			(ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK <<
826 			ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT));
827 	clk_ctrl_reg |= ((div0 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) <<
828 			ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) |
829 			((div1 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) <<
830 			ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT);
831 
832 	sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address);
833 #endif /* CONFIG_SOC_XILINX_ZYNQMP / CONFIG_SOC_FAMILY_XILINX_ZYNQ7000 */
834 
835 	LOG_DBG("%s set clock dividers div0/1 %u/%u for target "
836 		"frequency %u Hz", dev->name, div0, div1, target);
837 }
838 
839 /**
840  * @brief GEM initial Network Configuration Register setup function
841  * Writes the contents of the current GEM device's Network Configuration
842  * Register (NWCFG / gem.net_cfg). Called from within the device
843  * initialization function. Implementation differs depending on whether
844  * the current target is a Zynq-7000 or a ZynqMP.
845  *
846  * @param dev Pointer to the device data
847  */
eth_xlnx_gem_set_initial_nwcfg(const struct device * dev)848 static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev)
849 {
850 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
851 	uint32_t reg_val = 0;
852 
853 	if (dev_conf->ignore_ipg_rxer) {
854 		/* [30]     ignore IPG rx_er */
855 		reg_val |= ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT;
856 	}
857 	if (dev_conf->disable_reject_nsp) {
858 		/* [29]     disable rejection of non-standard preamble */
859 		reg_val |= ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT;
860 	}
861 	if (dev_conf->enable_ipg_stretch) {
862 		/* [28]     enable IPG stretch */
863 		reg_val |= ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT;
864 	}
865 	if (dev_conf->enable_sgmii_mode) {
866 		/* [27]     SGMII mode enable */
867 		reg_val |= ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT;
868 	}
869 	if (dev_conf->disable_reject_fcs_crc_errors) {
870 		/* [26]     disable rejection of FCS/CRC errors */
871 		reg_val |= ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT;
872 	}
873 	if (dev_conf->enable_rx_halfdup_while_tx) {
874 		/* [25]     RX half duplex while TX enable */
875 		reg_val |= ETH_XLNX_GEM_NWCFG_HDRXEN_BIT;
876 	}
877 	if (dev_conf->enable_rx_chksum_offload) {
878 		/* [24]     enable RX IP/TCP/UDP checksum offload */
879 		reg_val |= ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT;
880 	}
881 	if (dev_conf->disable_pause_copy) {
882 		/* [23]     Do not copy pause Frames to memory */
883 		reg_val |= ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT;
884 	}
885 	/* [22..21] Data bus width */
886 	reg_val |= (((uint32_t)(dev_conf->amba_dbus_width) &
887 		   ETH_XLNX_GEM_NWCFG_DBUSW_MASK) <<
888 		   ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT);
889 	/* [20..18] MDC clock divider */
890 	reg_val |= (((uint32_t)dev_conf->mdc_divider &
891 		   ETH_XLNX_GEM_NWCFG_MDC_MASK) <<
892 		   ETH_XLNX_GEM_NWCFG_MDC_SHIFT);
893 	if (dev_conf->discard_rx_fcs) {
894 		/* [17]     Discard FCS from received frames */
895 		reg_val |= ETH_XLNX_GEM_NWCFG_FCSREM_BIT;
896 	}
897 	if (dev_conf->discard_rx_length_errors) {
898 		/* [16]     RX length error discard */
899 		reg_val |= ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT;
900 	}
901 	/* [15..14] RX buffer offset */
902 	reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_offset &
903 		   ETH_XLNX_GEM_NWCFG_RXOFFS_MASK) <<
904 		   ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT);
905 	if (dev_conf->enable_pause) {
906 		/* [13]     Enable pause TX */
907 		reg_val |= ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT;
908 	}
909 	if (dev_conf->enable_tbi) {
910 		/* [11]     enable TBI instead of GMII/MII */
911 		reg_val |= ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT;
912 	}
913 	if (dev_conf->ext_addr_match) {
914 		/* [09]     External address match enable */
915 		reg_val |= ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT;
916 	}
917 	if (dev_conf->enable_1536_frames) {
918 		/* [08]     Enable 1536 byte frames reception */
919 		reg_val |= ETH_XLNX_GEM_NWCFG_1536RXEN_BIT;
920 	}
921 	if (dev_conf->enable_ucast_hash) {
922 		/* [07]     Receive unicast hash frames */
923 		reg_val |= ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT;
924 	}
925 	if (dev_conf->enable_mcast_hash) {
926 		/* [06]     Receive multicast hash frames */
927 		reg_val |= ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT;
928 	}
929 	if (dev_conf->disable_bcast) {
930 		/* [05]     Do not receive broadcast frames */
931 		reg_val |= ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT;
932 	}
933 	if (dev_conf->copy_all_frames) {
934 		/* [04]     Copy all frames */
935 		reg_val |= ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT;
936 	}
937 	if (dev_conf->discard_non_vlan) {
938 		/* [02]     Receive only VLAN frames */
939 		reg_val |= ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT;
940 	}
941 	if (dev_conf->enable_fdx) {
942 		/* [01]     enable Full duplex */
943 		reg_val |= ETH_XLNX_GEM_NWCFG_FDEN_BIT;
944 	}
945 	if (dev_conf->max_link_speed == LINK_100MBIT) {
946 		/* [00]     10 or 100 Mbps */
947 		reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT;
948 	} else if (dev_conf->max_link_speed == LINK_1GBIT) {
949 		/* [10]     Gigabit mode enable */
950 		reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT;
951 	}
952 	/*
953 	 * No else-branch for 10Mbit/s mode:
954 	 * in 10 Mbit/s mode, both bits [00] and [10] remain 0
955 	 */
956 
957 	/* Write the assembled register contents to gem.net_cfg */
958 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
959 }
960 
961 /**
962  * @brief GEM Network Configuration Register link speed update function
963  * Updates only the link speed-related bits of the Network Configuration
964  * register. This is called from within #eth_xlnx_gem_poll_phy.
965  *
966  * @param dev Pointer to the device data
967  */
eth_xlnx_gem_set_nwcfg_link_speed(const struct device * dev)968 static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev)
969 {
970 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
971 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
972 	uint32_t reg_val;
973 
974 	/*
975 	 * Read the current gem.net_cfg register contents and mask out
976 	 * the link speed-related bits
977 	 */
978 	reg_val  = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
979 	reg_val &= ~(ETH_XLNX_GEM_NWCFG_1000_BIT | ETH_XLNX_GEM_NWCFG_100_BIT);
980 
981 	/* No bits to set for 10 Mbps. 100 Mbps and 1 Gbps set one bit each. */
982 	if (dev_data->eff_link_speed == LINK_100MBIT) {
983 		reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT;
984 	} else if (dev_data->eff_link_speed == LINK_1GBIT) {
985 		reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT;
986 	}
987 
988 	/* Write the assembled register contents to gem.net_cfg */
989 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET);
990 }
991 
992 /**
993  * @brief GEM MAC address setup function
994  * Acquires the MAC address to be assigned to the current GEM device
995  * from the device configuration data which in turn acquires it from
996  * the device tree data, then writes it to the gem.spec_addr1_bot/LADDR1L
997  * and gem.spec_addr1_top/LADDR1H registers. Called from within the device
998  * initialization function.
999  *
1000  * @param dev Pointer to the device data
1001  */
eth_xlnx_gem_set_mac_address(const struct device * dev)1002 static void eth_xlnx_gem_set_mac_address(const struct device *dev)
1003 {
1004 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1005 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1006 	uint32_t regval_top;
1007 	uint32_t regval_bot;
1008 
1009 	regval_bot  = (dev_data->mac_addr[0] & 0xFF);
1010 	regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8;
1011 	regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16;
1012 	regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24;
1013 
1014 	regval_top  = (dev_data->mac_addr[4] & 0xFF);
1015 	regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8;
1016 
1017 	sys_write32(regval_bot, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1L_OFFSET);
1018 	sys_write32(regval_top, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1H_OFFSET);
1019 
1020 	LOG_DBG("%s MAC %02X:%02X:%02X:%02X:%02X:%02X",
1021 		dev->name,
1022 		dev_data->mac_addr[0],
1023 		dev_data->mac_addr[1],
1024 		dev_data->mac_addr[2],
1025 		dev_data->mac_addr[3],
1026 		dev_data->mac_addr[4],
1027 		dev_data->mac_addr[5]);
1028 }
1029 
1030 /**
1031  * @brief GEM initial DMA Control Register setup function
1032  * Writes the contents of the current GEM device's DMA Control Register
1033  * (DMACR / gem.dma_cfg). Called from within the device initialization
1034  * function.
1035  *
1036  * @param dev Pointer to the device data
1037  */
eth_xlnx_gem_set_initial_dmacr(const struct device * dev)1038 static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev)
1039 {
1040 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1041 	uint32_t reg_val = 0;
1042 
1043 	/*
1044 	 * gem.dma_cfg register bit (field) definitions:
1045 	 * comp. Zynq-7000 TRM, p. 1278 ff.
1046 	 */
1047 
1048 	if (dev_conf->disc_rx_ahb_unavail) {
1049 		/* [24] Discard RX packet when AHB unavailable */
1050 		reg_val |= ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT;
1051 	}
1052 	/*
1053 	 * [23..16] DMA RX buffer size in AHB system memory
1054 	 *    e.g.: 0x02 = 128, 0x18 = 1536, 0xA0 = 10240
1055 	 */
1056 	reg_val |= (((dev_conf->rx_buffer_size / 64) &
1057 		   ETH_XLNX_GEM_DMACR_RX_BUF_MASK) <<
1058 		   ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT);
1059 	if (dev_conf->enable_tx_chksum_offload) {
1060 		/* [11] TX TCP/UDP/IP checksum offload to GEM */
1061 		reg_val |= ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT;
1062 	}
1063 	if (dev_conf->tx_buffer_size_full) {
1064 		/* [10] TX buffer memory size select */
1065 		reg_val |= ETH_XLNX_GEM_DMACR_TX_SIZE_BIT;
1066 	}
1067 	/*
1068 	 * [09..08] RX packet buffer memory size select
1069 	 *          0 = 1kB, 1 = 2kB, 2 = 4kB, 3 = 8kB
1070 	 */
1071 	reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_size <<
1072 		   ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT) &
1073 		   ETH_XLNX_GEM_DMACR_RX_SIZE_MASK);
1074 	if (dev_conf->enable_ahb_packet_endian_swap) {
1075 		/* [07] AHB packet data endian swap enable */
1076 		reg_val |= ETH_XLNX_GEM_DMACR_ENDIAN_BIT;
1077 	}
1078 	if (dev_conf->enable_ahb_md_endian_swap) {
1079 		/* [06] AHB mgmt descriptor endian swap enable */
1080 		reg_val |= ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT;
1081 	}
1082 	/*
1083 	 * [04..00] AHB fixed burst length for DMA ops.
1084 	 *          00001 = single AHB bursts,
1085 	 *          001xx = attempt to use INCR4  bursts,
1086 	 *          01xxx = attempt to use INCR8  bursts,
1087 	 *          1xxxx = attempt to use INCR16 bursts
1088 	 */
1089 	reg_val |= ((uint32_t)dev_conf->ahb_burst_length &
1090 		   ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK);
1091 
1092 	/* Write the assembled register contents */
1093 	sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_DMACR_OFFSET);
1094 }
1095 
1096 /**
1097  * @brief GEM associated PHY detection and setup function
1098  * If the current GEM device shall manage an associated PHY, its detection
1099  * and configuration is performed from within this function. Called from
1100  * within the device initialization function. This function refers to
1101  * functionality implemented in the phy_xlnx_gem module.
1102  *
1103  * @param dev Pointer to the device data
1104  */
eth_xlnx_gem_init_phy(const struct device * dev)1105 static void eth_xlnx_gem_init_phy(const struct device *dev)
1106 {
1107 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1108 	int detect_rc;
1109 
1110 	LOG_DBG("%s attempting to initialize associated PHY", dev->name);
1111 
1112 	/*
1113 	 * The phy_xlnx_gem_detect function checks if a valid PHY
1114 	 * ID is returned when reading the corresponding high / low
1115 	 * ID registers for all valid MDIO addresses. If a compatible
1116 	 * PHY is detected, the function writes a pointer to the
1117 	 * vendor-specific implementations of the PHY management
1118 	 * functions to the run-time device data struct, along with
1119 	 * the ID and the MDIO address of the detected PHY (dev_data->
1120 	 * phy_id, dev_data->phy_addr, dev_data->phy_access_api).
1121 	 */
1122 	detect_rc = phy_xlnx_gem_detect(dev);
1123 
1124 	if (detect_rc == 0 && dev_data->phy_id != 0x00000000 &&
1125 			dev_data->phy_id != 0xFFFFFFFF &&
1126 			dev_data->phy_access_api != NULL) {
1127 		/* A compatible PHY was detected -> reset & configure it */
1128 		dev_data->phy_access_api->phy_reset_func(dev);
1129 		dev_data->phy_access_api->phy_configure_func(dev);
1130 	} else {
1131 		LOG_WRN("%s no compatible PHY detected", dev->name);
1132 	}
1133 }
1134 
1135 /**
1136  * @brief GEM associated PHY status polling function
1137  * This handler of a delayed work item is called from the context of
1138  * the system work queue. It is always scheduled at least once during the
1139  * interface initialization. If the current driver instance manages a
1140  * PHY, the delayed work item will be re-scheduled in order to continuously
1141  * monitor the link state and speed while the device is active. Link state
1142  * and link speed changes are polled, which may result in the link state
1143  * change being propagated (carrier on/off) and / or the TX clock being
1144  * reconfigured to match the current link speed. If PHY management is dis-
1145  * abled for the current driver instance or no compatible PHY was detected,
1146  * the work item will not be re-scheduled and default link speed and link
1147  * state values are applied. This function refers to functionality imple-
1148  * mented in the phy_xlnx_gem module.
1149  *
1150  * @param work Pointer to the delayed work item which facilitates
1151  *             access to the current device's configuration data
1152  */
eth_xlnx_gem_poll_phy(struct k_work * work)1153 static void eth_xlnx_gem_poll_phy(struct k_work *work)
1154 {
1155 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
1156 	struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(dwork,
1157 		struct eth_xlnx_gem_dev_data, phy_poll_delayed_work);
1158 	const struct device *dev = net_if_get_device(dev_data->iface);
1159 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1160 
1161 	uint16_t phy_status;
1162 	uint8_t link_status;
1163 
1164 	if (dev_data->phy_access_api != NULL) {
1165 		/* A supported PHY is managed by the driver */
1166 		phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev);
1167 
1168 		if ((phy_status & (
1169 			PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED |
1170 			PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED |
1171 			PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE)) != 0) {
1172 
1173 			/*
1174 			 * Get the PHY's link status. Handling a 'link down'
1175 			 * event the simplest possible case.
1176 			 */
1177 			link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev);
1178 
1179 			if (link_status == 0) {
1180 				/*
1181 				 * Link is down -> propagate to the Ethernet
1182 				 * layer that the link has gone down.
1183 				 */
1184 				dev_data->eff_link_speed = LINK_DOWN;
1185 				net_eth_carrier_off(dev_data->iface);
1186 
1187 				LOG_WRN("%s link down", dev->name);
1188 			} else {
1189 				/*
1190 				 * A link has been detected, which, depending
1191 				 * on the driver's configuration, might have
1192 				 * a different speed than the previous link.
1193 				 * Therefore, the clock dividers must be ad-
1194 				 * justed accordingly.
1195 				 */
1196 				dev_data->eff_link_speed =
1197 					dev_data->phy_access_api->phy_poll_link_speed_func(dev);
1198 
1199 				eth_xlnx_gem_configure_clocks(dev);
1200 				eth_xlnx_gem_set_nwcfg_link_speed(dev);
1201 				net_eth_carrier_on(dev_data->iface);
1202 
1203 				LOG_INF("%s link up, %s", dev->name,
1204 					(dev_data->eff_link_speed   == LINK_1GBIT)
1205 					? "1 GBit/s"
1206 					: (dev_data->eff_link_speed == LINK_100MBIT)
1207 					? "100 MBit/s"
1208 					: (dev_data->eff_link_speed == LINK_10MBIT)
1209 					? "10 MBit/s" : "undefined / link down");
1210 			}
1211 		}
1212 
1213 		/*
1214 		 * Re-submit the delayed work using the interval from the device
1215 		 * configuration data.
1216 		 */
1217 		k_work_reschedule(&dev_data->phy_poll_delayed_work,
1218 				  K_MSEC(dev_conf->phy_poll_interval));
1219 	} else {
1220 		/*
1221 		 * The current driver instance doesn't manage a PHY or no
1222 		 * supported PHY was detected -> pretend the configured max.
1223 		 * link speed is the effective link speed and that the link
1224 		 * is up. The delayed work item won't be re-scheduled, as
1225 		 * there isn't anything to poll for.
1226 		 */
1227 		dev_data->eff_link_speed = dev_conf->max_link_speed;
1228 
1229 		eth_xlnx_gem_configure_clocks(dev);
1230 		eth_xlnx_gem_set_nwcfg_link_speed(dev);
1231 		net_eth_carrier_on(dev_data->iface);
1232 
1233 		LOG_WRN("%s PHY not managed by the driver or no compatible "
1234 			"PHY detected, assuming link up at %s", dev->name,
1235 			(dev_conf->max_link_speed == LINK_1GBIT)
1236 			? "1 GBit/s"
1237 			: (dev_conf->max_link_speed == LINK_100MBIT)
1238 			? "100 MBit/s"
1239 			: (dev_conf->max_link_speed == LINK_10MBIT)
1240 			? "10 MBit/s" : "undefined");
1241 	}
1242 }
1243 
1244 /**
1245  * @brief GEM DMA memory area setup function
1246  * Sets up the DMA memory area to be used by the current GEM device.
1247  * Called from within the device initialization function or from within
1248  * the context of the PHY status polling delayed work handler.
1249  *
1250  * @param dev Pointer to the device data
1251  */
eth_xlnx_gem_configure_buffers(const struct device * dev)1252 static void eth_xlnx_gem_configure_buffers(const struct device *dev)
1253 {
1254 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1255 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1256 	struct eth_xlnx_gem_bd *bdptr;
1257 	uint32_t buf_iter;
1258 
1259 	/* Initial configuration of the RX/TX BD rings */
1260 	DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INIT_BD_RING)
1261 
1262 	/*
1263 	 * Set initial RX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5,
1264 	 * "Receive Buffer Descriptor List". The BD ring data other than
1265 	 * the base RX/TX buffer pointers will be set in eth_xlnx_gem_-
1266 	 * iface_init()
1267 	 */
1268 	bdptr = dev_data->rxbd_ring.first_bd;
1269 
1270 	for (buf_iter = 0; buf_iter < (dev_conf->rxbd_count - 1); buf_iter++) {
1271 		/* Clear 'used' bit -> BD is owned by the controller */
1272 		bdptr->ctrl = 0;
1273 		bdptr->addr = (uint32_t)dev_data->first_rx_buffer +
1274 			      (buf_iter * (uint32_t)dev_conf->rx_buffer_size);
1275 		++bdptr;
1276 	}
1277 
1278 	/*
1279 	 * For the last BD, bit [1] must be OR'ed in the buffer memory
1280 	 * address -> this is the 'wrap' bit indicating that this is the
1281 	 * last BD in the ring. This location is used as bits [1..0] can't
1282 	 * be part of the buffer address due to alignment requirements
1283 	 * anyways. Watch out: TX BDs handle this differently, their wrap
1284 	 * bit is located in the BD's control word!
1285 	 */
1286 	bdptr->ctrl = 0; /* BD is owned by the controller */
1287 	bdptr->addr = ((uint32_t)dev_data->first_rx_buffer +
1288 		      (buf_iter * (uint32_t)dev_conf->rx_buffer_size)) |
1289 		      ETH_XLNX_GEM_RXBD_WRAP_BIT;
1290 
1291 	/*
1292 	 * Set initial TX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5,
1293 	 * "Transmit Buffer Descriptor List". TX BD ring data has already
1294 	 * been set up in eth_xlnx_gem_iface_init()
1295 	 */
1296 	bdptr = dev_data->txbd_ring.first_bd;
1297 
1298 	for (buf_iter = 0; buf_iter < (dev_conf->txbd_count - 1); buf_iter++) {
1299 		/* Set up the control word -> 'used' flag must be set. */
1300 		bdptr->ctrl = ETH_XLNX_GEM_TXBD_USED_BIT;
1301 		bdptr->addr = (uint32_t)dev_data->first_tx_buffer +
1302 			      (buf_iter * (uint32_t)dev_conf->tx_buffer_size);
1303 		++bdptr;
1304 	}
1305 
1306 	/*
1307 	 * For the last BD, set the 'wrap' bit indicating to the controller
1308 	 * that this BD is the last one in the ring. -> For TX BDs, the 'wrap'
1309 	 * bit isn't located in the address word, but in the control word
1310 	 * instead
1311 	 */
1312 	bdptr->ctrl = (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT);
1313 	bdptr->addr = (uint32_t)dev_data->first_tx_buffer +
1314 		      (buf_iter * (uint32_t)dev_conf->tx_buffer_size);
1315 
1316 	/* Set free count/current index in the RX/TX BD ring data */
1317 	dev_data->rxbd_ring.next_to_process = 0;
1318 	dev_data->rxbd_ring.next_to_use     = 0;
1319 	dev_data->rxbd_ring.free_bds        = dev_conf->rxbd_count;
1320 	dev_data->txbd_ring.next_to_process = 0;
1321 	dev_data->txbd_ring.next_to_use     = 0;
1322 	dev_data->txbd_ring.free_bds        = dev_conf->txbd_count;
1323 
1324 	/* Write pointers to the first RX/TX BD to the controller */
1325 	sys_write32((uint32_t)dev_data->rxbd_ring.first_bd,
1326 		    dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET);
1327 	sys_write32((uint32_t)dev_data->txbd_ring.first_bd,
1328 		    dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET);
1329 }
1330 
1331 /**
1332  * @brief GEM RX data pending handler wrapper for the work queue
1333  * Wraps the RX data pending handler, eth_xlnx_gem_handle_rx_pending,
1334  * for the scenario in which the current GEM device is configured
1335  * to defer RX pending / TX done indication handling to the system
1336  * work queue. In this case, the work item received by this wrapper
1337  * function will be enqueued from within the ISR if the corresponding
1338  * bit is set within the controller's interrupt status register
1339  * (gem.intr_status).
1340  *
1341  * @param item Pointer to the work item enqueued by the ISR which
1342  *             facilitates access to the current device's data
1343  */
eth_xlnx_gem_rx_pending_work(struct k_work * item)1344 static void eth_xlnx_gem_rx_pending_work(struct k_work *item)
1345 {
1346 	struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item,
1347 		struct eth_xlnx_gem_dev_data, rx_pend_work);
1348 	const struct device *dev = net_if_get_device(dev_data->iface);
1349 
1350 	eth_xlnx_gem_handle_rx_pending(dev);
1351 }
1352 
1353 /**
1354  * @brief GEM RX data pending handler
1355  * This handler is called either from within the ISR or from the
1356  * context of the system work queue whenever the RX data pending bit
1357  * is set in the controller's interrupt status register (gem.intr_status).
1358  * No further RX data pending interrupts will be triggered until this
1359  * handler has been executed, which eventually clears the corresponding
1360  * interrupt status bit. This function acquires the incoming packet
1361  * data from the DMA memory area via the RX buffer descriptors and copies
1362  * the data to a packet which will then be handed over to the network
1363  * stack.
1364  *
1365  * @param dev Pointer to the device data
1366  */
eth_xlnx_gem_handle_rx_pending(const struct device * dev)1367 static void eth_xlnx_gem_handle_rx_pending(const struct device *dev)
1368 {
1369 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1370 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1371 	uint32_t reg_addr;
1372 	uint32_t reg_ctrl;
1373 	uint32_t reg_val;
1374 	uint32_t reg_val_rxsr;
1375 	uint8_t first_bd_idx;
1376 	uint8_t last_bd_idx;
1377 	uint8_t	curr_bd_idx;
1378 	uint32_t rx_data_length;
1379 	uint32_t rx_data_remaining;
1380 	struct net_pkt *pkt;
1381 
1382 	/* Read the RX status register */
1383 	reg_val_rxsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
1384 
1385 	/*
1386 	 * TODO Evaluate error flags from RX status register word
1387 	 * here for proper error handling.
1388 	 */
1389 
1390 	while (1) {
1391 		curr_bd_idx = dev_data->rxbd_ring.next_to_process;
1392 		first_bd_idx = last_bd_idx = curr_bd_idx;
1393 		reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr);
1394 		reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl);
1395 
1396 		/*
1397 		 * Basic precondition checks for the current BD's
1398 		 * address and control words
1399 		 */
1400 		reg_val = sys_read32(reg_addr);
1401 		if ((reg_val & ETH_XLNX_GEM_RXBD_USED_BIT) == 0) {
1402 			/*
1403 			 * No new data contained in the current BD
1404 			 * -> break out of the RX loop
1405 			 */
1406 			break;
1407 		}
1408 		reg_val = sys_read32(reg_ctrl);
1409 		if ((reg_val & ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT) == 0) {
1410 			/*
1411 			 * Although the current BD is marked as 'used', it
1412 			 * doesn't contain the SOF bit.
1413 			 */
1414 			LOG_ERR("%s unexpected missing SOF bit in RX BD [%u]",
1415 				dev->name, first_bd_idx);
1416 			break;
1417 		}
1418 
1419 		/*
1420 		 * As long as the current BD doesn't have the EOF bit set,
1421 		 * iterate forwards until the EOF bit is encountered. Only
1422 		 * the BD containing the EOF bit also contains the length
1423 		 * of the received packet which spans multiple buffers.
1424 		 */
1425 		do {
1426 			reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl);
1427 			reg_val  = sys_read32(reg_ctrl);
1428 			rx_data_length = rx_data_remaining =
1429 					 (reg_val & ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK);
1430 			if ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0) {
1431 				last_bd_idx = (last_bd_idx + 1) % dev_conf->rxbd_count;
1432 			}
1433 		} while ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0);
1434 
1435 		/*
1436 		 * Store the position of the first BD behind the end of the
1437 		 * frame currently being processed as 'next to process'
1438 		 */
1439 		dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) %
1440 						      dev_conf->rxbd_count;
1441 
1442 		/*
1443 		 * Allocate a destination packet from the network stack
1444 		 * now that the total frame length is known.
1445 		 */
1446 		pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length,
1447 						   AF_UNSPEC, 0, K_NO_WAIT);
1448 		if (pkt == NULL) {
1449 			LOG_ERR("RX packet buffer alloc failed: %u bytes",
1450 				rx_data_length);
1451 #ifdef CONFIG_NET_STATISTICS_ETHERNET
1452 			dev_data->stats.errors.rx++;
1453 			dev_data->stats.error_details.rx_no_buffer_count++;
1454 #endif
1455 		}
1456 
1457 		/*
1458 		 * Copy data from all involved RX buffers into the allocated
1459 		 * packet's data buffer. If we don't have a packet buffer be-
1460 		 * cause none are available, we still have to iterate over all
1461 		 * involved BDs in order to properly release them for re-use
1462 		 * by the controller.
1463 		 */
1464 		do {
1465 			if (pkt != NULL) {
1466 				net_pkt_write(pkt, (const void *)
1467 					      (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr &
1468 					      ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK),
1469 					      (rx_data_remaining < dev_conf->rx_buffer_size) ?
1470 					      rx_data_remaining : dev_conf->rx_buffer_size);
1471 			}
1472 			rx_data_remaining -= (rx_data_remaining < dev_conf->rx_buffer_size) ?
1473 					     rx_data_remaining : dev_conf->rx_buffer_size;
1474 
1475 			/*
1476 			 * The entire packet data of the current BD has been
1477 			 * processed, on to the next BD -> preserve the RX BD's
1478 			 * 'wrap' bit & address, but clear the 'used' bit.
1479 			 */
1480 			reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr);
1481 			reg_val	 = sys_read32(reg_addr);
1482 			reg_val &= ~ETH_XLNX_GEM_RXBD_USED_BIT;
1483 			sys_write32(reg_val, reg_addr);
1484 
1485 			curr_bd_idx = (curr_bd_idx + 1) % dev_conf->rxbd_count;
1486 		} while (curr_bd_idx != ((last_bd_idx + 1) % dev_conf->rxbd_count));
1487 
1488 		/* Propagate the received packet to the network stack */
1489 		if (pkt != NULL) {
1490 			if (net_recv_data(dev_data->iface, pkt) < 0) {
1491 				LOG_ERR("%s RX packet hand-over to IP stack failed",
1492 					dev->name);
1493 				net_pkt_unref(pkt);
1494 			}
1495 #ifdef CONFIG_NET_STATISTICS_ETHERNET
1496 			else {
1497 				dev_data->stats.bytes.received += rx_data_length;
1498 				dev_data->stats.pkts.rx++;
1499 			}
1500 #endif
1501 		}
1502 	}
1503 
1504 	/* Clear the RX status register */
1505 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET);
1506 	/* Re-enable the frame received interrupt source */
1507 	sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT,
1508 		    dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
1509 }
1510 
1511 /**
1512  * @brief GEM TX done handler wrapper for the work queue
1513  * Wraps the TX done handler, eth_xlnx_gem_handle_tx_done,
1514  * for the scenario in which the current GEM device is configured
1515  * to defer RX pending / TX done indication handling to the system
1516  * work queue. In this case, the work item received by this wrapper
1517  * function will be enqueued from within the ISR if the corresponding
1518  * bit is set within the controller's interrupt status register
1519  * (gem.intr_status).
1520  *
1521  * @param item Pointer to the work item enqueued by the ISR which
1522  *             facilitates access to the current device's data
1523  */
eth_xlnx_gem_tx_done_work(struct k_work * item)1524 static void eth_xlnx_gem_tx_done_work(struct k_work *item)
1525 {
1526 	struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item,
1527 		struct eth_xlnx_gem_dev_data, tx_done_work);
1528 	const struct device *dev = net_if_get_device(dev_data->iface);
1529 
1530 	eth_xlnx_gem_handle_tx_done(dev);
1531 }
1532 
1533 /**
1534  * @brief GEM TX done handler
1535  * This handler is called either from within the ISR or from the
1536  * context of the system work queue whenever the TX done bit is set
1537  * in the controller's interrupt status register (gem.intr_status).
1538  * No further TX done interrupts will be triggered until this handler
1539  * has been executed, which eventually clears the corresponding
1540  * interrupt status bit. Once this handler reaches the end of its
1541  * execution, the eth_xlnx_gem_send call which effectively triggered
1542  * it is unblocked by posting to the current GEM's TX done semaphore
1543  * on which the send function is blocking.
1544  *
1545  * @param dev Pointer to the device data
1546  */
eth_xlnx_gem_handle_tx_done(const struct device * dev)1547 static void eth_xlnx_gem_handle_tx_done(const struct device *dev)
1548 {
1549 	const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config;
1550 	struct eth_xlnx_gem_dev_data *dev_data = dev->data;
1551 	uint32_t reg_ctrl;
1552 	uint32_t reg_val;
1553 	uint32_t reg_val_txsr;
1554 	uint8_t curr_bd_idx;
1555 	uint8_t first_bd_idx;
1556 	uint8_t bds_processed = 0;
1557 	uint8_t bd_is_last;
1558 
1559 	/* Read the TX status register */
1560 	reg_val_txsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
1561 
1562 	/*
1563 	 * TODO Evaluate error flags from TX status register word
1564 	 * here for proper error handling
1565 	 */
1566 
1567 	if (dev_conf->defer_txd_to_queue) {
1568 		k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER);
1569 	}
1570 
1571 	curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process;
1572 	reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
1573 	reg_val  = sys_read32(reg_ctrl);
1574 
1575 	do {
1576 		++bds_processed;
1577 
1578 		/*
1579 		 * TODO Evaluate error flags from current BD control word
1580 		 * here for proper error handling
1581 		 */
1582 
1583 		/*
1584 		 * Check if the BD we're currently looking at is the last BD
1585 		 * of the current transmission
1586 		 */
1587 		bd_is_last = ((reg_val & ETH_XLNX_GEM_TXBD_LAST_BIT) != 0) ? 1 : 0;
1588 
1589 		/*
1590 		 * Reset control word of the current BD, clear everything but
1591 		 * the 'wrap' bit, then set the 'used' bit
1592 		 */
1593 		reg_val &= ETH_XLNX_GEM_TXBD_WRAP_BIT;
1594 		reg_val |= ETH_XLNX_GEM_TXBD_USED_BIT;
1595 		sys_write32(reg_val, reg_ctrl);
1596 
1597 		/* Move on to the next BD or break out of the loop */
1598 		if (bd_is_last == 1) {
1599 			break;
1600 		}
1601 		curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count;
1602 		reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl);
1603 		reg_val  = sys_read32(reg_ctrl);
1604 	} while (bd_is_last == 0 && curr_bd_idx != first_bd_idx);
1605 
1606 	if (curr_bd_idx == first_bd_idx && bd_is_last == 0) {
1607 		LOG_WRN("%s TX done handling wrapped around", dev->name);
1608 	}
1609 
1610 	dev_data->txbd_ring.next_to_process =
1611 		(dev_data->txbd_ring.next_to_process + bds_processed) %
1612 		dev_conf->txbd_count;
1613 	dev_data->txbd_ring.free_bds += bds_processed;
1614 
1615 	if (dev_conf->defer_txd_to_queue) {
1616 		k_sem_give(&(dev_data->txbd_ring.ring_sem));
1617 	}
1618 
1619 	/* Clear the TX status register */
1620 	sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET);
1621 
1622 	/* Re-enable the TX complete interrupt source */
1623 	sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT,
1624 		    dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET);
1625 
1626 	/* Indicate completion to a blocking eth_xlnx_gem_send() call */
1627 	k_sem_give(&dev_data->tx_done_sem);
1628 }
1629